Adding AWS SDK

This commit is contained in:
Dean Putney 2013-11-20 16:20:46 -08:00
parent b304e73634
commit 21218dd9a4
1886 changed files with 226401 additions and 0 deletions

5
composer.json Normal file
View File

@ -0,0 +1,5 @@
{
"require": {
"aws/aws-sdk-php": "*"
}
}

237
composer.lock generated Normal file
View File

@ -0,0 +1,237 @@
{
"_readme": [
"This file locks the dependencies of your project to a known state",
"Read more about it at http://getcomposer.org/doc/01-basic-usage.md#composer-lock-the-lock-file"
],
"hash": "4b7982fe778c6b6fa03052eba6bd0e46",
"packages": [
{
"name": "aws/aws-sdk-php",
"version": "2.4.10",
"source": {
"type": "git",
"url": "https://github.com/aws/aws-sdk-php.git",
"reference": "b494eb871d0aa3ba3b0a47a895fb18c6226edb88"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/aws/aws-sdk-php/zipball/b494eb871d0aa3ba3b0a47a895fb18c6226edb88",
"reference": "b494eb871d0aa3ba3b0a47a895fb18c6226edb88",
"shasum": ""
},
"require": {
"guzzle/guzzle": "~3.7.0",
"php": ">=5.3.3"
},
"require-dev": {
"doctrine/cache": "~1.0",
"ext-openssl": "*",
"monolog/monolog": "1.4.*",
"phpunit/phpunit": "3.7.*",
"symfony/class-loader": "2.*",
"symfony/yaml": "2.*"
},
"suggest": {
"doctrine/cache": "Adds support for caching of credentials and responses",
"ext-apc": "Allows service description opcode caching, request and response caching, and credentials caching",
"ext-openssl": "Allows working with CloudFront private distributions and verifying received SNS messages",
"monolog/monolog": "Adds support for logging HTTP requests and responses",
"symfony/yaml": "Eases the ability to write manifests for creating jobs in AWS Import/Export"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "2.4.x-dev"
}
},
"autoload": {
"psr-0": {
"Aws": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"Apache-2.0"
],
"authors": [
{
"name": "Amazon Web Services",
"homepage": "http://aws.amazon.com"
}
],
"description": "AWS SDK for PHP",
"homepage": "http://aws.amazon.com/sdkforphp2",
"keywords": [
"amazon",
"aws",
"dynamodb",
"ec2",
"s3",
"sdk"
],
"time": "2013-11-14 17:24:28"
},
{
"name": "guzzle/guzzle",
"version": "v3.7.4",
"source": {
"type": "git",
"url": "https://github.com/guzzle/guzzle.git",
"reference": "b170b028c6bb5799640e46c8803015b0f9a45ed9"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/guzzle/guzzle/zipball/b170b028c6bb5799640e46c8803015b0f9a45ed9",
"reference": "b170b028c6bb5799640e46c8803015b0f9a45ed9",
"shasum": ""
},
"require": {
"ext-curl": "*",
"php": ">=5.3.3",
"symfony/event-dispatcher": ">=2.1"
},
"replace": {
"guzzle/batch": "self.version",
"guzzle/cache": "self.version",
"guzzle/common": "self.version",
"guzzle/http": "self.version",
"guzzle/inflection": "self.version",
"guzzle/iterator": "self.version",
"guzzle/log": "self.version",
"guzzle/parser": "self.version",
"guzzle/plugin": "self.version",
"guzzle/plugin-async": "self.version",
"guzzle/plugin-backoff": "self.version",
"guzzle/plugin-cache": "self.version",
"guzzle/plugin-cookie": "self.version",
"guzzle/plugin-curlauth": "self.version",
"guzzle/plugin-error-response": "self.version",
"guzzle/plugin-history": "self.version",
"guzzle/plugin-log": "self.version",
"guzzle/plugin-md5": "self.version",
"guzzle/plugin-mock": "self.version",
"guzzle/plugin-oauth": "self.version",
"guzzle/service": "self.version",
"guzzle/stream": "self.version"
},
"require-dev": {
"doctrine/cache": "*",
"monolog/monolog": "1.*",
"phpunit/phpunit": "3.7.*",
"psr/log": "1.0.*",
"symfony/class-loader": "*",
"zendframework/zend-cache": "2.0.*",
"zendframework/zend-log": "2.0.*"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "3.7-dev"
}
},
"autoload": {
"psr-0": {
"Guzzle\\Tests": "tests/",
"Guzzle": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "Michael Dowling",
"email": "mtdowling@gmail.com",
"homepage": "https://github.com/mtdowling"
},
{
"name": "Guzzle Community",
"homepage": "https://github.com/guzzle/guzzle/contributors"
}
],
"description": "Guzzle is a PHP HTTP client library and framework for building RESTful web service clients",
"homepage": "http://guzzlephp.org/",
"keywords": [
"client",
"curl",
"framework",
"http",
"http client",
"rest",
"web service"
],
"time": "2013-10-02 20:47:00"
},
{
"name": "symfony/event-dispatcher",
"version": "v2.3.7",
"target-dir": "Symfony/Component/EventDispatcher",
"source": {
"type": "git",
"url": "https://github.com/symfony/EventDispatcher.git",
"reference": "2d8ece3c610726a73d0c95c885134efea182610e"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/symfony/EventDispatcher/zipball/2d8ece3c610726a73d0c95c885134efea182610e",
"reference": "2d8ece3c610726a73d0c95c885134efea182610e",
"shasum": ""
},
"require": {
"php": ">=5.3.3"
},
"require-dev": {
"symfony/dependency-injection": "~2.0"
},
"suggest": {
"symfony/dependency-injection": "",
"symfony/http-kernel": ""
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "2.3-dev"
}
},
"autoload": {
"psr-0": {
"Symfony\\Component\\EventDispatcher\\": ""
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "Fabien Potencier",
"email": "fabien@symfony.com"
},
{
"name": "Symfony Community",
"homepage": "http://symfony.com/contributors"
}
],
"description": "Symfony EventDispatcher Component",
"homepage": "http://symfony.com",
"time": "2013-10-13 06:32:10"
}
],
"packages-dev": [
],
"aliases": [
],
"minimum-stability": "stable",
"stability-flags": [
],
"platform": [
],
"platform-dev": [
]
}

7
vendor/autoload.php vendored Normal file
View File

@ -0,0 +1,7 @@
<?php
// autoload.php @generated by Composer
require_once __DIR__ . '/composer' . '/autoload_real.php';
return ComposerAutoloaderInitb29840e4cb34422c942582fc8c5973ae::getLoader();

18
vendor/aws/aws-sdk-php/.gitignore vendored Normal file
View File

@ -0,0 +1,18 @@
*.phar
*.zip
build/artifacts
phpunit.xml
phpunit.functional.xml
test_services.json
Config
./Makefile
.idea
.DS_Store
.swp
.build
composer.lock
vendor
docs/_build
docs/_themes/flask/flask_theme_support.pyc
docs/_ext/aws/__init__.pyc
src/package.xml

10
vendor/aws/aws-sdk-php/.travis.yml vendored Normal file
View File

@ -0,0 +1,10 @@
language: php
php:
- 5.3
- 5.4
- 5.5
before_script:
- sh -c 'if [ $(php -r "echo PHP_MINOR_VERSION;") -le 4 ]; then echo "extension = apc.so" >> ~/.phpenv/versions/$(phpenv version-name)/etc/php.ini; fi;'
- cp test_services.json.dist test_services.json
- composer install --dev
script: vendor/bin/phpunit

397
vendor/aws/aws-sdk-php/CHANGELOG.md vendored Normal file
View File

@ -0,0 +1,397 @@
CHANGELOG
=========
2.4.10 (2013-11-14)
-------------------
* Added support for **AWS CloudTrail**
* Added support for identity federation using SAML 2.0 to the AWS STS client
* Added support for configuring SAML-compliant identity providers to the AWS IAM client
* Added support for event notifications to the Amazon Redshift client
* Added support for HSM storage for encryption keys to the Amazon Redshift client
* Added support for encryption key rotation to the Amazon Redshift client
* Added support for database audit logging to the Amazon Redshift client
2.4.9 (2013-11-08)
------------------
* Added support for [cross-zone load balancing](http://aws.amazon.com/about-aws/whats-new/2013/11/06/elastic-load-balancing-adds-cross-zone-load-balancing/)
to the Elastic Load Balancing client.
* Added support for a [new gateway configuration](http://aws.amazon.com/about-aws/whats-new/2013/11/05/aws-storage-gateway-announces-gateway-virtual-tape-library/),
Gateway-Virtual Tape Library, to the AWS Storage Gateway client.
* Added support for stack policies to the the AWS CloudFormation client.
* Fixed issue #176 where attempting to upload a direct to Amazon S3 using the `UploadBuilder` failed when using a custom
iterator that needs to be rewound.
2.4.8 (2013-10-31)
------------------
* Updated the AWS Direct Connect client
* Updated the Amazon Elastic MapReduce client to add support for new EMR APIs, termination of specific cluster
instances, and unlimited EMR steps.
2.4.7 (2013-10-17)
------------------
* Added support for audio transcoding features to the Amazon Elastic Transcoder client
* Added support for modifying Reserved Instances in a region to the Amazon EC2 client
* Added support for new resource management features to the AWS OpsWorks client
* Added support for additional HTTP methods to the Amazon CloudFront client
* Added support for custom error page configuration to the Amazon CloudFront client
* Added support for the public IP address association of instances in Auto Scaling group via the Auto Scaling client
* Added support for tags and filters to various operations in the Amazon RDS client
* Added the ability to easily specify event listeners on waiters
* Added support for using the `ap-southeast-2` region to the Amazon Glacier client
* Added support for using the `ap-southeast-1` and `ap-southeast-2` regions to the Amazon Redshift client
* Updated the Amazon EC2 client to use the 2013-09-11 API version
* Updated the Amazon CloudFront client to use the 2013-09-27 API version
* Updated the AWS OpsWorks client to use the 2013-07-15 API version
* Updated the Amazon CloudSearch client to use Signature Version 4
* Fixed an issue with the Amazon S3 Client so that the top-level XML element of the `CompleteMultipartUpload` operation
is correctly sent as `CompleteMultipartUpload`
* Fixed an issue with the Amazon S3 Client so that you can now disable bucket logging using with the `PutBucketLogging`
operation
* Fixed an issue with the Amazon CloudFront so that query string parameters in pre-signed URLs are correctly URL-encoded
* Fixed an issue with the Signature Version 4 implementation where headers with multiple values were sometimes sorted
and signed incorrectly
2.4.6 (2013-09-12)
------------------
* Added support for modifying EC2 Reserved Instances to the Amazon EC2 client
* Added support for VPC features to the AWS OpsWorks client
* Updated the DynamoDB Session Handler to implement the SessionHandlerInterface of PHP 5.4 when available
* Updated the SNS Message Validator to throw an exception, instead of an error, when the raw post data is invalid
* Fixed an issue in the S3 signature which ensures that parameters are sorted correctly for signing
* Fixed an issue in the S3 client where the Sydney region was not allowed as a `LocationConstraint` for the
`PutObject` operation
2.4.5 (2013-09-04)
------------------
* Added support for replication groups to the Amazon ElastiCache client
* Added support for using the `us-gov-west-1` region to the AWS CloudFormation client
2.4.4 (2013-08-29)
------------------
* Added support for assigning a public IP address to an instance at launch to the Amazon EC2 client
* Updated the Amazon EC2 client to use the 2013-07-15 API version
* Updated the Amazon SWF client to sign requests with Signature V4
* Updated the Instance Metadata client to allow for higher and more customizable connection timeouts
* Fixed an issue with the SDK where XML map structures were not being serialized correctly in some cases
* Fixed issue #136 where a few of the new Amazon SNS mobile push operations were not working properly
* Fixed an issue where the AWS STS `AssumeRoleWithWebIdentity` operation was requiring credentials and a signature
unnecessarily
* Fixed and issue with the `S3Client::uploadDirectory` method so that true key prefixes can be used
* [Docs] Updated the API docs to include sample code for each operation that indicates the parameter structure
* [Docs] Updated the API docs to include more information in the descriptions of operations and parameters
* [Docs] Added a page about Iterators to the user guide
2.4.3 (2013-08-12)
------------------
* Added support for mobile push notifications to the Amazon SNS client
* Added support for progress reporting on snapshot restore operations to the the Amazon Redshift client
* Updated the Amazon Elastic MapReduce client to use JSON serialization
* Updated the Amazon Elastic MapReduce client to sign requests with Signature V4
* Updated the SDK to throw `Aws\Common\Exception\TransferException` exceptions when a network error occurs instead of a
`Guzzle\Http\Exception\CurlException`. The TransferException class, however, extends from
`Guzzle\Http\Exception\CurlException`. You can continue to catch the Guzzle `CurlException` or catch
`Aws\Common\Exception\AwsExceptionInterface` to catch any exception that can be thrown by an AWS client
* Fixed an issue with the Amazon S3 stream wrapper where trailing slashes were being added when listing directories
2.4.2 (2013-07-25)
------------------
* Added support for cross-account snapshot access control to the Amazon Redshift client
* Added support for decoding authorization messages to the AWS STS client
* Added support for checking for required permissions via the `DryRun` parameter to the Amazon EC2 client
* Added support for custom Amazon Machine Images (AMIs) and Chef 11 to the AWS OpsWorks client
* Added an SDK compatibility test to allow users to quickly determine if their system meets the requirements of the SDK
* Updated the Amazon EC2 client to use the 2013-06-15 API version
* Fixed an unmarshalling error with the Amazon EC2 `CreateKeyPair` operation
* Fixed an unmarshalling error with the Amazon S3 `ListMultipartUploads` operation
* Fixed an issue with the Amazon S3 stream wrapper "x" fopen mode
* Fixed an issue with `Aws\S3\S3Client::downloadBucket` by removing leading slashes from the passed `$keyPrefix` argument
2.4.1 (2013-06-08)
------------------
* Added support for setting watermarks and max framerates to the Amazon Elastic Transcoder client
* Added the `Aws\DynamoDb\Iterator\ItemIterator` class to make it easier to get items from the results of DynamoDB
operations in a simpler form
* Added support for the `cr1.8xlarge` EC2 instance type. Use `Aws\Ec2\Enum\InstanceType::CR1_8XLARGE`
* Added support for the suppression list SES mailbox simulator. Use `Aws\Ses\Enum\MailboxSimulator::SUPPRESSION_LIST`
* [SDK] Fixed an issue with data formats throughout the SDK due to a regression. Dates are now sent over the wire with
the correct format. This issue affected the Amazon EC2, Amazon ElastiCache, AWS Elastic Beanstalk, Amazon EMR, and
Amazon RDS clients
* Fixed an issue with the parameter serialization of the `ImportInstance` operation in the Amazon EC2 client
* Fixed an issue with the Amazon S3 client where the `RoutingRules.Redirect.HostName` parameter of the
`PutBucketWebsite` operation was erroneously marked as required
* Fixed an issue with the Amazon S3 client where the `DeleteObject` operation was missing parameters
* Fixed an issue with the Amazon S3 client where the `Status` parameter of the `PutBucketVersioning` operation did not
properly support the "Suspended" value
* Fixed an issue with the Amazon Glacier `UploadPartGenerator` class so that an exception is thrown if the provided body
to upload is less than 1 byte
* Added MD5 validation to Amazon SQS ReceiveMessage operations
2.4.0 (2013-06-18)
------------------
* [BC] Updated the Amazon CloudFront client to use the new 2013-05-12 API version which includes changes in how you
configure distributions. If you are not ready to upgrade to the new API, you can configure the SDK to use the previous
version of the API by setting the `version` option to `2012-05-05` when you instantiate the client (See
[`UPGRADING.md`](https://github.com/aws/aws-sdk-php/blob/master/UPGRADING.md))
* Added abstractions for uploading a local directory to an Amazon S3 bucket (`$s3->uploadDirectory()`)
* Added abstractions for downloading an Amazon S3 bucket to local directory (`$s3->downloadBucket()`)
* Added an easy to way to delete objects from an Amazon S3 bucket that match a regular expression or key prefix
* Added an easy to way to upload an object to Amazon S3 that automatically uses a multipart upload if the size of the
object exceeds a customizable threshold (`$s3->upload()`)
* [SDK] Added facade classes for simple, static access to clients (e.g., `S3::putObject([...])`)
* Added the `Aws\S3\S3Client::getObjectUrl` convenience method for getting the URL of an Amazon S3 object. This works
for both public and pre-signed URLs
* Added support for using the `ap-northeast-1` region to the Amazon Redshift client
* Added support for configuring custom SSL certificates to the Amazon CloudFront client via the `ViewerCertificate`
parameter
* Added support for read replica status to the Amazon RDS client
* Added "magic" access to iterators to make using iterators more convenient (e.g., `$s3->getListBucketsIterator()`)
* Added the `waitUntilDBInstanceAvailable` and `waitUntilDBInstanceDeleted` waiters to the Amazon RDS client
* Added the `createCredentials` method to the AWS STS client to make it easier to create a credentials object from the
results of an STS operation
* Updated the Amazon RDS client to use the 2013-05-15 API version
* Updated request retrying logic to automatically refresh expired credentials and retry with new ones
* Updated the Amazon CloudFront client to sign requests with Signature V4
* Updated the Amazon SNS client to sign requests with Signature V4, which enables larger payloads
* Updated the S3 Stream Wrapper so that you can use stream resources in any S3 operation without having to manually
specify the `ContentLength` option
* Fixed issue #94 so that the `Aws\S3\BucketStyleListener` is invoked on `command.after_prepare` and presigned URLs
are generated correctly from S3 commands
* Fixed an issue so that creating presigned URLs using the Amazon S3 client now works with temporary credentials
* Fixed an issue so that the `CORSRules.AllowedHeaders` parameter is now available when configuring CORS for Amazon S3
* Set the Guzzle dependency to ~3.7.0
2.3.4 (2013-05-30)
------------------
* Set the Guzzle dependency to ~3.6.0
2.3.3 (2013-05-28)
------------------
* Added support for web identity federation in the AWS Security Token Service (STS) API
* Fixed an issue with creating pre-signed Amazon CloudFront RTMP URLs
* Fixed issue #85 to correct the parameter serialization of NetworkInterfaces within the Amazon EC2 RequestSpotInstances
operation
2.3.2 (2013-05-15)
------------------
* Added support for doing parallel scans to the Amazon DynamoDB client
* [OpsWorks] Added support for using Elastic Load Balancer to the AWS OpsWorks client
* Added support for using EBS-backed instances to the AWS OpsWorks client along with some other minor updates
* Added support for finer-grained error messages to the AWS Data Pipeline client and updated the service description
* Added the ability to set the `key_pair_id` and `private_key` options at the time of signing a CloudFront URL instead
of when instantiating the client
* Added a new [Zip Download](http://pear.amazonwebservices.com/get/aws.zip) for installing the SDK
* Fixed the API version for the AWS Support client to be `2013-04-15`
* Fixed issue #78 by implementing `Aws\S3\StreamWrapper::stream_cast()` for the S3 stream wrapper
* Fixed issue #79 by updating the S3 `ClearBucket` object to work with the `ListObjects` operation
* Fixed issue #80 where the `ETag` was incorrectly labeled as a header value instead of being in the XML body for
the S3 `CompleteMultipartUpload` operation response
* Fixed an issue where the `setCredentials()` method did not properly update the `SignatureListener`
* Updated the required version of Guzzle to `">=3.4.3,<4"` to support Guzzle 3.5 which provides the SDK with improved
memory management
2.3.1 (2013-04-30)
------------------
* Added support for **AWS Support**
* Added support for using the `eu-west-1` region to the Amazon Redshift client
* Fixed an issue with the Amazon RDS client where the `DownloadDBLogFilePortion` operation was not being serialized
properly
* Fixed an issue with the Amazon S3 client where the `PutObjectCopy` alias was interfering with the `CopyObject`
operation
* Added the ability to manually set a Content-Length header when using the `PutObject` and `UploadPart` operations of
the Amazon S3 client
* Fixed an issue where the Amazon S3 class was not throwing an exception for a non-followable 301 redirect response
* Fixed an issue where `fflush()` was called during the shutdown process of the stream handler for read-only streams
2.3.0 (2013-04-18)
------------------
* Added support for Local Secondary Indexes to the Amazon DynamoDB client
* [BC] Updated the Amazon DynamoDB client to use the new 2012-08-10 API version which includes changes in how you
specify keys. If you are not ready to upgrade to the new API, you can configure the SDK to use the previous version of
the API by setting the `version` option to `2011-12-05` when you instantiate the client (See
[`UPGRADING.md`](https://github.com/aws/aws-sdk-php/blob/master/UPGRADING.md)).
* Added an Amazon S3 stream wrapper that allows PHP native file functions to be used to interact with S3 buckets and
objects
* Added support for automatically retrying *throttled* requests with exponential backoff to all service clients
* Added a new config option (`version`) to client objects to specify the API version to use if multiple are supported
* Added a new config option (`gc_operation_delay`) to the DynamoDB Session Handler to specify a delay between requests
to the service during garbage collection in order to help regulate the consumption of throughput
* Added support for using the `us-west-2` region to the Amazon Redshift client
* [Docs] Added a way to use marked integration test code as example code in the user guide and API docs
* Updated the Amazon RDS client to sign requests with Signature V4
* Updated the Amazon S3 client to automatically add the `Content-Type` to `PutObject` and other upload operations
* Fixed an issue where service clients with a global endpoint could have their region for signing set incorrectly if a
region other than `us-east-1` was specified.
* Fixed an issue where reused command objects appended duplicate content to the user agent string
* [SDK] Fixed an issue in a few operations (including `SQS::receiveMessage`) where the `curl.options` could not be
modified
* [Docs] Added key information to the DynamoDB service description to provide more accurate API docs for some operations
* [Docs] Added a page about Waiters to the user guide
* [Docs] Added a page about the DynamoDB Session Handler to the user guide
* [Docs] Added a page about response Models to the user guide
* Bumped the required version of Guzzle to ~3.4.1
2.2.1 (2013-03-18)
------------------
* Added support for viewing and downloading DB log files to the Amazon RDS client
* Added the ability to validate incoming Amazon SNS messages. See the `Aws\Sns\MessageValidator` namespace
* Added the ability to easily change the credentials that a client is configured to use via `$client->setCredentials()`
* Added the `client.region_changed` and `client.credentials_changed` events on the client that are triggered when the
`setRegion()` and `setCredentials()` methods are called, respectively
* Added support for using the `ap-southeast-2` region with the Amazon ElastiCache client
* Added support for using the `us-gov-west-1` region with the Amazon SWF client
* Updated the Amazon RDS client to use the 2013-02-12 API version
* Fixed an issue in the Amazon EC2 service description that was affecting the use of the new `ModifyVpcAttribute` and
`DescribeVpcAttribute` operations
* Added `ObjectURL` to the output of an Amazon S3 PutObject operation so that you can more easily retrieve the URL of an
object after uploading
* Added a `createPresignedUrl()` method to any command object created by the Amazon S3 client to more easily create
presigned URLs
2.2.0 (2013-03-11)
------------------
* Added support for **Amazon Elastic MapReduce (Amazon EMR)**
* Added support for **AWS Direct Connect**
* Added support for **Amazon ElastiCache**
* Added support for **AWS Storage Gateway**
* Added support for **AWS Import/Export**
* Added support for **AWS CloudFormation**
* Added support for **Amazon CloudSearch**
* Added support for [provisioned IOPS](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.ProvisionedIOPS.html)
to the the Amazon RDS client
* Added support for promoting [read replicas](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html)
to the Amazon RDS client
* Added support for [event notification subscriptions](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html)
to the Amazon RDS client
* Added support for enabling\disabling DNS Hostnames and DNS Resolution in Amazon VPC to the Amazon EC2 client
* Added support for enumerating account attributes to the Amazon EC2 client
* Added support for copying AMIs across regions to the Amazon EC2 client
* Added the ability to get a Waiter object from a client using the `getWaiter()` method
* [SDK] Added the ability to load credentials from environmental variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY`.
This is compatible with AWS Elastic Beanstalk environment configurations
* Added support for using the us-west-1, us-west-2, eu-west-1, and ap-southeast-1 regions with Amazon CloudSearch
* Updated the Amazon RDS client to use the 2013-01-10 API version
* Updated the Amazon EC2 client to use the 2013-02-01 API version
* Added support for using SecurityToken with signature version 2 services
* Added the client User-Agent header to exception messages for easier debugging
* Added an easier way to disable operation parameter validation by setting `validation` to false when creating clients
* Added the ability to disable the exponential backoff plugin
* Added the ability to easily fetch the region name that a client is configured to use via `$client->getRegion()`
* Added end-user guides available at http://docs.aws.amazon.com/aws-sdk-php/guide/latest/
* Fixed issue #48 where signing Amazon S3 requests with null or empty metadata resulted in a signature error
* Fixed issue #29 where Amazon S3 was intermittently closing a connection
* Updated the Amazon S3 client to parse the AcceptRanges header for HeadObject and GetObject output
* Updated the Amazon Glacier client to allow the `saveAs` parameter to be specified as an alias for `command.response_body`
* Various performance improvements throughout the SDK
* Removed endpoint providers and now placing service region information directly in service descriptions
* Removed client resolvers when creating clients in a client's factory method (this should not have any impact to end users)
2.1.2 (2013-02-18)
------------------
* Added support for **AWS OpsWorks**
2.1.1 (2013-02-15)
------------------
* Added support for **Amazon Redshift**
* Added support for **Amazon Simple Queue Service (Amazon SQS)**
* Added support for **Amazon Simple Notification Service (Amazon SNS)**
* Added support for **Amazon Simple Email Service (Amazon SES)**
* Added support for **Auto Scaling**
* Added support for **Amazon CloudWatch**
* Added support for **Amazon Simple Workflow Service (Amazon SWF)**
* Added support for **Amazon Relational Database Service (Amazon RDS)**
* Added support for health checks and failover in Amazon Route 53
* Updated the Amazon Route 53 client to use the 2012-12-12 API version
* Updated `AbstractWaiter` to dispatch `waiter.before_attempt` and `waiter.before_wait` events
* Updated `CallableWaiter` to allow for an array of context data to be passed to the callable
* Fixed issue #29 so that the stat cache is cleared before performing multipart uploads
* Fixed issue #38 so that Amazon CloudFront URLs are signed properly
* Fixed an issue with Amazon S3 website redirects
* Fixed a URL encoding inconsistency with Amazon S3 and pre-signed URLs
* Fixed issue #42 to eliminate cURL error 65 for JSON services
* Set Guzzle dependency to [~3.2.0](https://github.com/guzzle/guzzle/blob/master/CHANGELOG.md#320-2013-02-14)
* Minimum version of PHP is now 5.3.3
2.1.0 (2013-01-28)
------------------
* Waiters now require an associative array as input for the underlying operation performed by a waiter. See
`UPGRADING.md` for details.
* Added support for **Amazon Elastic Compute Cloud (Amazon EC2)**
* Added support for **Amazon Elastic Transcoder**
* Added support for **Amazon SimpleDB**
* Added support for **Elastic Load Balancing**
* Added support for **AWS Elastic Beanstalk**
* Added support for **AWS Identity and Access Management (IAM)**
* Added support for Amazon S3 website redirection rules
* Added support for the `RetrieveByteRange` parameter of the `InitiateJob` operation in Amazon Glacier
* Added support for Signature Version 2
* Clients now gain more information from service descriptions rather than client factory methods
* Service descriptions are now versioned for clients
* Fixed an issue where Amazon S3 did not use "restore" as a signable resource
* Fixed an issue with Amazon S3 where `x-amz-meta-*` headers were not properly added with the CopyObject operation
* Fixed an issue where the Amazon Glacier client was not using the correct User-Agent header
* Fixed issue #13 in which constants defined by referencing other constants caused errors with early versions of PHP 5.3
2.0.3 (2012-12-20)
------------------
* Added support for **AWS Data Pipeline**
* Added support for **Amazon Route 53**
* Fixed an issue with the Amazon S3 client where object keys with slashes were causing errors
* Added a `SaveAs` parameter to the Amazon S3 `GetObject` operation to allow saving the object directly to a file
* Refactored iterators to remove code duplication and ease creation of future iterators
2.0.2 (2012-12-10)
------------------
* Fixed an issue with the Amazon S3 client where non-DNS compatible buckets that was previously causing a signature
mismatch error
* Fixed an issue with the service description for the Amazon S3 `UploadPart` operation so that it works correctly
* Fixed an issue with the Amazon S3 service description dealing with `response-*` query parameters of `GetObject`
* Fixed an issue with the Amazon S3 client where object keys prefixed by the bucket name were being treated incorrectly
* Fixed an issue with `Aws\S3\Model\MultipartUpload\ParallelTransfer` class
* Added support for the `AssumeRole` operation for AWS STS
* Added a the `UploadBodyListener` which allows upload operations in Amazon S3 and Amazon Glacier to accept file handles
in the `Body` parameter and file paths in the `SourceFile` parameter
* Added Content-Type guessing for uploads
* Added new region endpoints, including sa-east-1 and us-gov-west-1 for Amazon DynamoDB
* Added methods to `Aws\S3\Model\MultipartUpload\UploadBuilder` class to make setting ACL and Content-Type easier
2.0.1 (2012-11-13)
------------------
* Fixed a signature issue encountered when a request to Amazon S3 is redirected
* Added support for archiving Amazon S3 objects to Amazon Glacier
* Added CRC32 validation of Amazon DynamoDB responses
* Added ConsistentRead support to the `BatchGetItem` operation of Amazon DynamoDB
* Added new region endpoints, including Sydney
2.0.0 (2012-11-02)
------------------
* Initial release of the AWS SDK for PHP Version 2. See <http://aws.amazon.com/sdkforphp2/> for more information.
* Added support for **Amazon Simple Storage Service (Amazon S3)**
* Added support for **Amazon DynamoDB**
* Added support for **Amazon Glacier**
* Added support for **Amazon CloudFront**
* Added support for **AWS Security Token Service (AWS STS)**

80
vendor/aws/aws-sdk-php/CONTRIBUTING.md vendored Normal file
View File

@ -0,0 +1,80 @@
# Contributing to the AWS SDK for PHP
We work hard to provide a high-quality and useful SDK, and we greatly value feedback and contributions from our
community. Whether it's a new feature, correction, or additional documentation, we welcome your pull requests.
Please submit any [issues][] or [pull requests][pull-requests] through GitHub.
## What you should keep in mind
1. The SDK is released under the [Apache license][license]. Any code you submit will be released under that license. For
substantial contributions, we may ask you to sign a [Contributor License Agreement (CLA)][cla].
2. We follow the [PSR-0][], [PSR-1][], and [PSR-2][] recommendations from the [PHP Framework Interop Group][php-fig].
Please submit code that follows these standards. The [PHP CS Fixer][cs-fixer] tool can be helpful for formatting your
code.
3. We maintain a high percentage of code coverage in our unit tests. If you make changes to the code, please add,
update, and/or remove unit (and integration) tests as appropriate.
4. We may choose not to accept pull requests that change service descriptions (e.g., files like
`src/Aws/OpsWorks/Resources/opsworks-2013-02-18.php`). We generate these files based on our internal knowledge of
the AWS services. If there is something incorrect with or missing from a service description, it may be more
appropriate to [submit an issue][issues]. We *will*, however, consider pull requests affecting service descriptions,
if the changes are related to **Iterator** or **Waiter** configurations (e.g. [PR #84][pr-84]).
5. If your code does not conform to the PSR standards or does not include adequate tests, we may ask you to update your
pull requests before we accept them. We also reserve the right to deny any pull requests that do not align with our
standards or goals.
6. If you would like to implement support for a significant feature that is not yet available in the SDK, please talk to
us beforehand to avoid any duplication of effort.
## What we are looking for
We are open to anything that improves the SDK and doesn't unnecessarily cause backwards-incompatible changes. If you are
unsure if your idea is something we would be open to, please ask us (open a ticket, send us an email, post on the
forums, etc.) Specifically, here are a few things that we would appreciate help on:
1. **Waiters** Waiter configurations are located in the service descriptions. You can also create concrete waiters
within the `Aws\*\Waiter` namespace of a service if the logic of the waiter absolutely cannot be defined using waiter
configuration. There are many waiters that we currently provide, but many that we do not. Please let us know if you
have any questions about creating waiter configurations.
2. **Docs** Our [User Guide][user-guide] is an ongoing project, and we would greatly appreciate contributions. The
docs are written as a [Sphinx][] website using [reStructuredText][] (very similar to Markdown). The User Guide is
located in the `docs` directory of this repository. Please see the [User Guide README][docs-readme] for more
information about how to build the User Guide.
3. **Tests** We maintain high code coverage, but if there are any tests you feel are missing, please add them.
4. **Convenience features** Are there any features you feel would add value to the SDK (e.g., batching for SES, SNS
message verification, S3 stream wrapper, etc.)? Contributions in this area would be greatly appreciated.
5. **Third-party modules** We have modules published for [Silex](mod-silex), [Laravel 4](mod-laravel), and [Zend
Framework 2][mod-zf2]. Please let us know if you are interested in creating integrations with other frameworks. We
would be be happy to help.
6. If you have some other ideas, please let us know!
## Running the unit tests
The AWS SDK for PHP is unit tested using PHPUnit. You can run the unit tests of the SDK after copying
phpunit.xml.dist to phpunit.xml:
cp phpunit.xml.dist phpunit.xml
Next, you need to install the dependencies of the SDK using Composer:
composer.phar install
Now you're ready to run the unit tests using PHPUnit:
vendor/bin/phpunit
[issues]: https://github.com/aws/aws-sdk-php/issues
[pull-requests]: https://github.com/aws/aws-sdk-php/pulls
[license]: http://aws.amazon.com/apache2.0/
[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement
[psr-0]: https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-0.md
[psr-1]: https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-1-basic-coding-standard.md
[psr-2]: https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-2-coding-style-guide.md
[php-fig]: http://php-fig.org
[cs-fixer]: http://cs.sensiolabs.org/
[user-guide]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/index.html
[sphinx]: http://sphinx-doc.org/
[restructuredtext]: http://sphinx-doc.org/rest.html
[docs-readme]: https://github.com/aws/aws-sdk-php/blob/master/docs/README.md
[mod-silex]: https://github.com/aws/aws-sdk-php-silex
[mod-laravel]: https://github.com/aws/aws-sdk-php-laravel
[mod-zf2]: https://github.com/aws/aws-sdk-php-zf2
[pr-84]: https://github.com/aws/aws-sdk-php/pull/84

141
vendor/aws/aws-sdk-php/LICENSE.md vendored Normal file
View File

@ -0,0 +1,141 @@
# Apache License
Version 2.0, January 2004
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
## 1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1
through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the
License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled
by, or are under common control with that entity. For the purposes of this definition, "control" means
(i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract
or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial
ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software
source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form,
including but not limited to compiled object code, generated documentation, and conversions to other media
types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License,
as indicated by a copyright notice that is included in or attached to the work (an example is provided in the
Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from)
the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent,
as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not
include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work
and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any
modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to
Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to
submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of
electronic, verbal, or written communication sent to the Licensor or its representatives, including but not
limited to communication on electronic mailing lists, source code control systems, and issue tracking systems
that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been
received by Licensor and subsequently incorporated within the Work.
## 2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare
Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
## 3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent
license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such
license applies only to those patent claims licensable by such Contributor that are necessarily infringed by
their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such
Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim
or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work
constitutes direct or contributory patent infringement, then any patent licenses granted to You under this
License for that Work shall terminate as of the date such litigation is filed.
## 4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You meet the following conditions:
1. You must give any other recipients of the Work or Derivative Works a copy of this License; and
2. You must cause any modified files to carry prominent notices stating that You changed the files; and
3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent,
trademark, and attribution notices from the Source form of the Work, excluding those notices that do
not pertain to any part of the Derivative Works; and
4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that
You distribute must include a readable copy of the attribution notices contained within such NOTICE
file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed as part of the Derivative Works; within
the Source form or documentation, if provided along with the Derivative Works; or, within a display
generated by the Derivative Works, if and wherever such third-party notices normally appear. The
contents of the NOTICE file are for informational purposes only and do not modify the License. You may
add Your own attribution notices within Derivative Works that You distribute, alongside or as an
addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be
construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license
terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative
Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the
conditions stated in this License.
## 5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by
You to the Licensor shall be under the terms and conditions of this License, without any additional terms or
conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate
license agreement you may have executed with Licensor regarding such Contributions.
## 6. Trademarks.
This License does not grant permission to use the trade names, trademarks, service marks, or product names of
the Licensor, except as required for reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
## 7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor
provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT,
MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
## 8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless
required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any
Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential
damages of any character arising as a result of this License or out of the use or inability to use the Work
(including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has been advised of the possibility
of such damages.
## 9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for,
acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole
responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold
each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

112
vendor/aws/aws-sdk-php/NOTICE.md vendored Normal file
View File

@ -0,0 +1,112 @@
# AWS SDK for PHP
<http://aws.amazon.com/php>
Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
<http://aws.amazon.com/apache2.0>
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
# Guzzle
<https://github.com/guzzle/guzzle>
Copyright (c) 2011 Michael Dowling, https://github.com/mtdowling
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
# Symfony
<https://github.com/symfony/symfony>
Copyright (c) 2004-2012 Fabien Potencier
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
# Doctrine Common
<https://github.com/doctrine/common>
Copyright (c) 2006-2012 Doctrine Project
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# Monolog
<https://github.com/Seldaek/monolog>
Copyright (c) Jordi Boggiano
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

144
vendor/aws/aws-sdk-php/README.md vendored Normal file
View File

@ -0,0 +1,144 @@
# AWS SDK for PHP
[![Latest Stable Version](https://poser.pugx.org/aws/aws-sdk-php/version.png)](https://packagist.org/packages/aws/aws-sdk-php)
[![Total Downloads](https://poser.pugx.org/aws/aws-sdk-php/d/total.png)](https://packagist.org/packages/aws/aws-sdk-php)
[![Build Status](https://travis-ci.org/aws/aws-sdk-php.png)](https://travis-ci.org/aws/aws-sdk-php)
The **AWS SDK for PHP** enables PHP developers to easily work with [Amazon Web Services][aws] and build scalable
solutions with Amazon S3, Amazon DynamoDB, Amazon Glacier, and more. You can get started in minutes by [installing the
SDK through Composer][docs-installation] or by downloading a single [zip][install-zip] or [phar][install-phar] file.
* **Install**:
* [Instructions][docs-installation]
* [Packagist/Composer][install-packagist]
* Download [Zip][install-zip] or [Phar][install-phar]
* [PEAR Package][install-pear]
* **Docs**:
* [User Guide][docs-guide]
* [API Docs][docs-api]
* [Apache 2.0 License][sdk-license]
* **Community**:
* [AWS PHP Development Blog][sdk-blog]
* [AWS PHP Development Forum][sdk-forum]
* [GitHub Issues][sdk-issues]
* [Contribution Guide][docs-contribution]
## Features
* Provides easy-to-use HTTP clients for all supported AWS services, regions, and authentication protocols.
* Built for PHP 5.3.3+ and is compliant with [PSR-0][], [PSR-1][], and [PSR-2][].
* Easy to install through [Composer][install-packagist], [PEAR][install-pear], or single download ([zip][install-zip] or
[phar][install-phar]).
* Built on [Guzzle][] and utilizes many of its features including persistent connections, parallel requests, events and
plugins (via [Symfony2 EventDispatcher][symfony2-events]), service descriptions, [over-the-wire
logging][docs-wire-logging], caching, flexible batching, and request retrying with truncated exponential backoff.
* Convenience features including [Iterators][docs-iterators], [Waiters][docs-waiters], and [modelled
responses][docs-models].
* Upload directories to and download directories from Amazon S3.
* Multipart uploader for Amazon S3 and Amazon Glacier that can be paused and resumed.
* [DynamoDB Session Handler][docs-ddbsh] for easily scaling sessions.
* Automatically uses [IAM Instance Profile Credentials][aws-iam-credentials] on configured Amazon EC2 instances.
## Getting Started
1. **Sign up for AWS** Before you begin, you need an AWS account. Please see the [Signing Up for AWS][docs-signup]
section of the user guide for information about how to create an AWS account and retrieve your AWS credentials.
1. **Minimum requirements** To run the SDK you will need **PHP 5.3.3+** compiled with the cURL extension and cURL
7.16.2+ compiled with OpenSSL and zlib. For more information about the requirements and optimum settings for the SDK,
please see the [Requirements][docs-requirements] section of the user guide.
1. **Install the SDK** Using [Composer][] is the recommended way to install the AWS SDK for PHP. The SDK is available
via [Packagist][] under the [`aws/aws-sdk-php`][install-packagist] package. Please see the
[Installation][docs-installation] section of the user guide for more detailed information about installing the SDK
through Composer and other means (e.g., [Phar][install-phar], [Zip][install-zip], [PEAR][install-pear]).
1. **Using the SDK** The best way to become familiar with how to use the SDK is to read the [User Guide][docs-guide].
The [Quick Start Guide][docs-quickstart] will help you become familiar with the basic concepts, and there are also
specific guides for each of the [supported services][docs-services].
## Quick Example
### Upload a File to Amazon S3
```php
<?php
require 'vendor/autoload.php';
use Aws\Common\Aws;
use Aws\S3\Exception\S3Exception;
// Instantiate an S3 client
$s3 = Aws::factory('/path/to/config.php')->get('s3');
// Upload a publicly accessible file. The file size, file type, and MD5 hash are automatically calculated by the SDK
try {
$s3->putObject(array(
'Bucket' => 'my-bucket',
'Key' => 'my-object',
'Body' => fopen('/path/to/file', 'r'),
'ACL' => 'public-read',
));
} catch (S3Exception $e) {
echo "There was an error uploading the file.\n";
}
```
You can also use the even easier `upload` method.
```php
try {
$s3->upload('my-bucket', 'my-object', fopen('/path/to/file', 'r'), 'public-read');
} catch (S3Exception $e) {
echo "There was an error uploading the file.\n";
}
```
### More Examples
* [Get an object from Amazon S3 and save it to a file][example-s3-getobject]
* [Upload a large file to Amazon S3 in parts][example-s3-multipart]
* [Put an item in your Amazon DynamoDB table][example-dynamodb-putitem]
* [Send a message to your Amazon SQS queue][example-sqs-sendmessage]
[sdk-website]: http://aws.amazon.com/sdkforphp
[sdk-forum]: https://forums.aws.amazon.com/forum.jspa?forumID=80
[sdk-issues]: https://github.com/aws/aws-sdk-php/issues
[sdk-license]: http://aws.amazon.com/apache2.0/
[sdk-blog]: http://blogs.aws.amazon.com/php
[install-packagist]: https://packagist.org/packages/aws/aws-sdk-php
[install-phar]: http://pear.amazonwebservices.com/get/aws.phar
[install-zip]: http://pear.amazonwebservices.com/get/aws.zip
[install-pear]: http://pear.amazonwebservices.com
[docs-api]: http://docs.aws.amazon.com/aws-sdk-php/latest/index.html
[docs-guide]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/index.html
[docs-contribution]: https://github.com/aws/aws-sdk-php/blob/master/CONTRIBUTING.md
[docs-performance]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/performance.html
[docs-migration]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/migration-guide.html
[docs-signup]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/awssignup.html
[docs-requirements]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/requirements.html
[docs-installation]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/installation.html
[docs-quickstart]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/quick-start.html
[docs-iterators]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/quick-start.html#iterators
[docs-waiters]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/feature-waiters.html
[docs-models]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/feature-models.html
[docs-exceptions]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/quick-start.html#error-handling
[docs-wire-logging]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/faq.html#how-can-i-see-what-data-is-sent-over-the-wire
[docs-services]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/index.html#supported-services
[docs-ddbsh]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/feature-dynamodb-session-handler.html
[aws]: http://aws.amazon.com/
[aws-iam-credentials]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UsingIAM.html#UsingIAMrolesWithAmazonEC2Instances
[guzzle]: http://guzzlephp.org
[composer]: http://getcomposer.org
[packagist]: http://packagist.org
[psr-0]: https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-0.md
[psr-1]: https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-1-basic-coding-standard.md
[psr-2]: https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-2-coding-style-guide.md
[symfony2-events]: http://symfony.com/doc/2.0/components/event_dispatcher/introduction.html
[example-sqs-sendmessage]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/service-sqs.html#sending-messages
[example-s3-getobject]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/service-s3.html#saving-objects-to-a-file
[example-s3-multipart]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/service-s3.html#uploading-large-files-using-multipart-uploads
[example-dynamodb-putitem]: http://docs.aws.amazon.com/aws-sdk-php/guide/latest/service-dynamodb.html#adding-items

114
vendor/aws/aws-sdk-php/UPGRADING.md vendored Normal file
View File

@ -0,0 +1,114 @@
Upgrading Guide
===============
Upgrade from 2.3 to 2.4
-----------------------
### Amazon CloudFront Client
The new 2013-05-12 API version of Amazon CloudFront includes support for custom SSL certificates via the
`ViewerCertificate` parameter, but also introduces breaking changes to the API. Version 2.4 of the SDK now ships with
two versions of the Amazon CloudFront service description, one for the new 2013-05-12 API and one for the next most
recent 2012-05-05 API. The SDK defaults to using the newest API version, so CloudFront users may experience a breaking
change to their projects when upgrading. This can be easily circumvented by switching back to the 2012-05-05 API by
using the `version` option when instantiating the CloudFront client.
### Guzzle 3.7
Version 2.4 of the AWS SDK for PHP requires at least version 3.7 of Guzzle.
Upgrade from 2.2 to 2.3
-----------------------
### Amazon DynamoDB Client
The newly released 2012-08-10 API version of the Amazon DynamoDB service includes the new Local Secondary Indexes
feature, but also introduces breaking changes to the API. The most notable change is in the way that you specify keys
when creating tables and retrieving items. Version 2.3 of the SDK now ships with 2 versions of the DynamoDB service
description, one for the new 2012-08-10 API and one for the next most recent 2011-12-05 API. The SDK defaults to using
the newest API version, so DynamoDB users may experience a breaking change to their projects when upgrading. This can be
easily fixed by switching back to the 2011-12-05 API by using the new `version` configuration setting when instantiating
the DynamoDB client.
```php
use Aws\DynamoDb\DynamoDbClient;
$client = DynamoDbClient::factory(array(
'key' => '<aws access key>',
'secret' => '<aws secret key>',
'region' => '<region name>',
'version' => '2011-12-05'
));
```
If you are using a config file with `Aws\Common\Aws`, then you can modify your file like the following.
```json
{
"includes": ["_aws"],
"services": {
"default_settings": {
"params": {
"key": "<aws access key>",
"secret": "<aws secret key>",
"region": "<region name>"
}
},
"dynamodb": {
"extends": "dynamodb",
"params": {
"version": "2011-12-05"
}
}
}
}
```
The [SDK user guide](http://docs.aws.amazon.com/aws-sdk-php/guide/latest/index.html) has a guide and examples for both
versions of the API.
### Guzzle 3.4.1
Version 2.3 of the AWS SDK for PHP requires at least version 3.4.1 of Guzzle.
Upgrade from 2.1 to 2.2
-----------------------
### Full Service Coverage
The AWS SDK for PHP now supports the full set of AWS services.
### Guzzle 3.3
Version 2.2 of the AWS SDK for PHP requires at least version 3.3 of Guzzle.
Upgrade from 2.0 to 2.1
-----------------------
### General
Service descriptions are now versioned under the Resources/ directory of each client.
### Waiters
Waiters now require an associative array as input for the underlying operation performed by a waiter. The configuration
system for waiters under 2.0.x utilized strings to determine the parameters used to create an operation. For example,
when waiting for an object to exist with Amazon S3, you would pass a string containing the bucket name concatenated
with the object name using a '/' separator (e.g. 'foo/baz'). In the 2.1 release, these parameters are now more
explicitly tied to the underlying operation utilized by a waiter. For example, to use the ObjectExists waiter of
Amazon S3 pass an associative array of `array('Bucket' => 'foo', 'Key' => 'baz')`. These options match the option names
and rules associated with the HeadObject operation performed by the waiter. The API documentation of each client
describes the waiters associated with the client and what underlying operation is responsible for waiting on the
resource. Waiter specific options like the maximum number of attempts (max_attempts) or interval to wait between
retries (interval) can be specified in this same configuration array by prefixing the keys with `waiter.`.
Waiters can also be invoked using magic methods on the client. These magic methods are listed in each client's docblock
using `@method` tags.
```php
$s3Client->waitUntilObjectExists(array(
'Bucket' => 'foo',
'Key' => 'bar',
'waiter.max_attempts' => 3
));
```

254
vendor/aws/aws-sdk-php/build.xml vendored Normal file
View File

@ -0,0 +1,254 @@
<?xml version="1.0" encoding="UTF-8"?>
<project name="aws-sdk-for-php" default="test">
<property name="dir.output" value="${project.basedir}/build/artifacts" />
<property name="coverage" value="false" />
<property name="mock" value="false" />
<property name="min" value="false" />
<property name="sdk_url" value="http://aws.amazon.com/sdkforphp2" />
<fileset id="src_files" dir="${project.basedir}/src" includes="**/*.php" />
<target name="test" description="Run unit tests" depends="test-init">
<exec passthru="true" command="vendor/bin/phpunit" />
</target>
<target name="integration" description="Run integration tests">
<if>
<isset property="service" />
<then>
<property name="testpath" value="tests/Aws/Tests/${service}" />
</then>
<else>
<property name="testpath" value="" />
</else>
</if>
<if>
<available file="phpunit.functional.xml" />
<then>
<if>
<equals arg1="${mock}" arg2="true" />
<then>
<echo>php -d mock=true `which phpunit` -c phpunit.functional.xml</echo>
<exec passthru="true" command="php -d mock=true `which phpunit` -c phpunit.functional.xml ${testpath}" />
</then>
<else>
<exec passthru="true" command="phpunit -c phpunit.functional.xml ${testpath}" />
</else>
</if>
</then>
<else>
<fail>You must copy phpunit.functional.dist to phpunit.functional.xml and modify the appropriate property settings</fail>
</else>
</if>
</target>
<target name="create-staging" description="Creates a staging directory for zip and phar creation">
<delete dir="${dir.output}/staging" failonerror="false" quiet="true"/>
<mkdir dir="${dir.output}/staging"/>
<mkdir dir="${dir.output}/staging/Aws"/>
<mkdir dir="${dir.output}/staging/Guzzle"/>
<mkdir dir="${dir.output}/staging/Doctrine/Common/Cache"/>
<mkdir dir="${dir.output}/staging/Symfony"/>
<mkdir dir="${dir.output}/staging/Monolog"/>
<patternset id="sdk-files">
<include name="**/*.php" />
<include name="**/*.pem" />
<include name="**/*.md5" />
<include name="**/LICENSE*" />
</patternset>
<!-- Copy AWS deps -->
<copy file="${project.basedir}/build/aws-autoloader.php" tofile="${dir.output}/staging/aws-autoloader.php"/>
<copy todir="${dir.output}/staging">
<fileset dir="src">
<patternset refid="sdk-files"/>
</fileset>
</copy>
<copy file="${project.basedir}/LICENSE.md" tofile="${dir.output}/staging/Aws/LICENSE.md"/>
<copy file="${project.basedir}/NOTICE.md" tofile="${dir.output}/staging/Aws/NOTICE.md"/>
<!-- Copy Symfony dependencies -->
<copy todir="${dir.output}/staging">
<fileset dir="vendor/symfony/event-dispatcher">
<include name="**/*.php" />
</fileset>
</copy>
<copy todir="${dir.output}/staging">
<fileset dir="vendor/symfony/class-loader">
<include name="**/*.php" />
</fileset>
</copy>
<!-- Copy Guzzle deps -->
<copy todir="${dir.output}/staging">
<fileset dir="vendor/guzzle/guzzle/src">
<patternset refid="sdk-files"/>
</fileset>
</copy>
<!-- Copy Monolog deps -->
<copy todir="${dir.output}/staging">
<fileset dir="vendor/monolog/monolog/src">
<patternset refid="sdk-files"/>
</fileset>
</copy>
<!-- Copy PSR deps -->
<copy todir="${dir.output}/staging">
<fileset dir="vendor/psr/log">
<include name="**/*.php" />
</fileset>
</copy>
<!-- Copy Doctrine deps -->
<copy todir="${dir.output}/staging">
<fileset dir="vendor/doctrine/cache/lib">
<patternset refid="sdk-files"/>
</fileset>
</copy>
</target>
<target name="phar" depends="create-staging" description="Create a phar with an autoloader">
<pharpackage destfile="build/aws.phar" stub="build/phar-stub.php" basedir="${dir.output}/staging">
<fileset dir="${dir.output}/staging">
<include name="**/**"/>
</fileset>
<metadata>
<element name="link" value="${sdk_url}" />
</metadata>
</pharpackage>
</target>
<target name="zip" depends="create-staging" description="Create a ZIP file containing the SDK and its dependencies">
<zip destfile="build/aws.zip" basedir="${dir.output}/staging">
<fileset dir="${dir.output}/staging">
<include name="**/**"/>
</fileset>
</zip>
</target>
<target name="test-init" description="Initialize test dependencies">
<copy file="phpunit.xml.dist" tofile="phpunit.xml" overwrite="false" />
<copy file="phpunit.functional.xml.dist" tofile="phpunit.functional.xml" overwrite="false" />
<copy file="test_services.json.dist" tofile="test_services.json" overwrite="false" />
</target>
<target name="clean" description="Deletes build artifacts">
<delete dir="${dir.output}"/>
</target>
<target name="prepare" depends="clean,test-init">
<mkdir dir="${dir.output}"/>
<mkdir dir="${dir.output}/logs" />
</target>
<target name="clean-dependencies" description="Deletes all dependencies downloaded by Composer">
<delete dir="${project.basedir}/vendor"/>
<delete file="composer.lock" />
</target>
<target name="update-dependencies" description="Updates Composer dependencies">
<exec command="php composer.phar update --dev" passthru="true" />
</target>
<target name="coverage">
<if>
<isset property="service" />
<then>
<property name="testpath" value="tests/Aws/Tests/${service}" />
</then>
<else>
<property name="testpath" value="" />
</else>
</if>
<mkdir dir="${dir.output}/logs" />
<exec passthru="true" command="phpunit --coverage-html=${dir.output}/coverage --coverage-clover=${dir.output}/logs/clover.xml ${testpath}" />
</target>
<target name="view-coverage">
<exec passthru="true" command="open ${dir.output}/coverage/index.html" />
</target>
<target name="phpdepend">
<delete dir="${dir.output}/pdepend" includeemptydirs="true" verbose="true" failonerror="true" />
<mkdir dir="${dir.output}/pdepend" />
<phpdepend>
<fileset refid="src_files" />
<analyzer type="coderank-mode" value="method"/>
<logger type="jdepend-chart" outfile="${dir.output}/pdepend/jdepend-chart.svg" />
<logger type="overview-pyramid" outfile="${dir.output}/pdepend/overview-pyramid.svg" />
<logger type="jdepend-chart" outfile="${dir.output}/pdepend/jdepend-chart.png" />
<logger type="overview-pyramid" outfile="${dir.output}/pdepend/overview-pyramid.png" />
<logger type="jdepend-xml" outfile="${dir.output}/pdepend/jdepend.xml" />
<logger type="phpunit-xml" outfile="${dir.output}/pdepend/phpunit.xml" />
<logger type="summary-xml" outfile="${dir.output}/pdepend/summary.xml" />
</phpdepend>
</target>
<target name="phpcs">
<delete dir="${dir.output}/phpcs" includeemptydirs="true" verbose="true" failonerror="true" />
<mkdir dir="${dir.output}/phpcs" />
<!-- <phpcodesniffer></phpcodesniffer> -->
</target>
<target name="phpmd">
<delete dir="${dir.output}/phpmd" includeemptydirs="true" verbose="true" failonerror="true" />
<mkdir dir="${dir.output}/phpmd" />
<phpmd>
<fileset refid="src_files" />
<formatter type="html" outfile="${dir.output}/phpmd/phpmd.html"/>
<formatter type="xml" outfile="${dir.output}/phpmd/phpmd.xml"/>
</phpmd>
</target>
<target name="phpcpd">
<delete dir="${dir.output}/phpcpd" includeemptydirs="true" verbose="true" failonerror="true" />
<mkdir dir="${dir.output}/phpcpd" />
<phpcpd>
<fileset refid="src_files" />
<formatter type="pmd" outfile="${dir.output}/phpcpd/pmd.xml" />
<formatter type="default" outfile="${dir.output}/phpcpd/default.xml" />
</phpcpd>
</target>
<target name="phploc">
<exec command="phploc --log-csv ${dir.output}/logs/phploc.csv ." dir="${project.basedir}/src" passthru="true" />
</target>
<target name="phplint">
<phplint>
<fileset refid="src_files" />
</phplint>
</target>
<target name="phpcb" description="Aggregate tool output with PHP_CodeBrowser">
<exec executable="phpcb">
<arg value="--log" />
<arg path="${dir.output}/logs" />
<arg value="--source" />
<arg path="${project.basedir}/src" />
<arg value="--output" />
<arg path="${dir.output}/code-browser" />
</exec>
</target>
<target name="install-build-deps">
<exec command="pear install --alldeps pear.phpunit.de/PHPUnit" passthru="true" />
<exec command="pear install --alldeps phpunit/PHP_CodeBrowser" passthru="true" />
<exec command="pear install --alldeps phpunit/phploc" passthru="true" />
<exec command="pear install --alldeps pear.pdepend.org/PHP_Depend-beta" passthru="true" />
<exec command="pear install --alldeps pear.phpmd.org/PHP_PMD" passthru="true" />
<exec command="pear install --alldeps pear.phpunit.de/phpcpd" passthru="true" />
<exec command="pear install --alldeps PHP_CodeSniffer" passthru="true" />
<exec command="pear install --alldeps pear.phing.info/phing" passthru="true" />
</target>
<target name="all" depends="clean, prepare, test-init, build, report"/>
<target name="build" depends="phplint, prepare, test-init, test, phar"/>
<target name="report" depends="coverage, phploc, phpcs, phpmd, phpcpd, phpdepend, phpcb"/>
</project>

View File

@ -0,0 +1,35 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
require_once __DIR__ . '/Symfony/Component/ClassLoader/UniversalClassLoader.php';
if (!defined('AWS_FILE_PREFIX')) {
define('AWS_FILE_PREFIX', __DIR__);
}
$classLoader = new Symfony\Component\ClassLoader\UniversalClassLoader();
$classLoader->registerNamespaces(array(
'Aws' => AWS_FILE_PREFIX,
'Guzzle' => AWS_FILE_PREFIX,
'Symfony' => AWS_FILE_PREFIX,
'Doctrine' => AWS_FILE_PREFIX,
'Psr' => AWS_FILE_PREFIX,
'Monolog' => AWS_FILE_PREFIX
));
$classLoader->register();
return $classLoader;

View File

@ -0,0 +1,24 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
Phar::mapPhar('aws.phar');
define('AWS_PHAR', true);
define('AWS_FILE_PREFIX', 'phar://aws.phar');
return (require 'phar://aws.phar/aws-autoloader.php');
__HALT_COMPILER();

View File

@ -0,0 +1,197 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
// Run this script from the command line to see if your system is able to run the AWS SDK for PHP
class CompatibilityTest
{
protected $isCli;
protected $lines = array();
public function __construct()
{
$this->isCli = php_sapi_name() == 'cli';
$title = 'AWS SDK for PHP Compatibility Test';
if ($this->isCli) {
$rep = str_repeat('=', strlen($title));
$this->lines[] = "{$rep}\n{$title}\n{$rep}";
} else {
$this->lines[] = sprintf(
'<style type="text/css">%s %s</style>',
'html {font-family:verdana;} .OK {color: #166116;}',
'.FAIL {margin-top: 1em; color: #A52C27;} .WARNING {margin-top: 1em; color:#6B036B;}'
);
$this->lines[] = "<h1>{$title}</h1>";
}
}
public function endTest()
{
$text = implode("\n", $this->lines);
echo $this->isCli ? $text : "<html><body>{$text}</body></html>";
}
public function title($text)
{
$this->lines[] = $this->isCli
? "\n" . $text . "\n" . str_repeat('-', strlen($text)) . "\n"
: "<h2>{$text}</h2>";
}
public function write($text)
{
$this->lines[] = $text;
}
public function quote($text)
{
return !$this->isCli
? "<pre>{$text}</pre>"
: implode("\n", array_map(function ($t) { return ' ' . $t; }, explode("\n", $text)));
}
public function check($info, $func, $text, $required)
{
$level = $func() ? 'OK' : ($required ? 'FAIL' : 'WARNING');
if ($this->isCli) {
$text = $level == 'OK' ? "- {$info}: [OK]" : "- {$info}: [{$level}]\n - {$text}";
} else {
$text = $level == 'OK'
? "<span class=\"{$level}\">{$info}</span><br />"
: "<div class=\"{$level}\">{$info}: [{$level}]<br /><blockquote>{$text}</blockquote></div>";
}
$this->write($text);
}
public function addRecommend($info, $func, $text)
{
$this->check($info, $func, $text, false);
}
public function addRequire($info, $func, $text)
{
$this->check($info, $func, $text, true);
}
public function iniCheck($info, $setting, $expected, $required = true, $help = null)
{
$current = ini_get($setting);
$cb = function () use ($current, $expected) {
return is_callable($expected)
? call_user_func($expected, $current)
: $current == $expected;
};
$message = sprintf(
'%s in %s is currently set to %s but %s be set to %s.',
$setting,
php_ini_loaded_file(),
var_export($current, true),
$required ? 'must' : 'should',
var_export($expected, true)
) . ' ' . $help;
$this->check($info, $cb, trim($message), $required);
}
public function extCheck($ext, $required = true, $help = '')
{
$info = sprintf('Checking if the %s extension is installed', $ext);
$cb = function () use ($ext) { return extension_loaded($ext); };
$message = $help ?: sprintf('The %s extension %s be installed', $ext, $required ? 'must' : 'should');
$this->check($info, $cb, $message, $required);
}
}
$c = new CompatibilityTest();
$c->title('System requirements');
$c->addRequire(
'Ensuring that the version of PHP is >= 5.3.3',
function () { return version_compare(phpversion(), '5.3.3', '>='); },
'You must update your version of PHP to 5.3.3 to run the AWS SDK for PHP'
);
$c->iniCheck('Ensuring that detect_unicode is disabled', 'detect_unicode', false, true, 'Enabling detect_unicode may cause errors when using phar files. See https://bugs.php.net/bug.php?id=42396');
$c->iniCheck('Ensuring that session.auto_start is disbaled', 'session.auto_start', false);
if (extension_loaded('suhosin')) {
$c->addRequire(
'Ensuring that phar files can be run with the suhosin patch',
function () {
return false !== stripos(ini_get('suhosin.executor.include.whitelist'), 'phar');
},
sprintf('suhosin.executor.include.whitelist must be configured to include "phar" in %s so that the phar file works correctly', php_ini_loaded_file())
);
}
foreach (array('pcre', 'spl', 'json', 'dom', 'simplexml', 'curl') as $ext) {
$c->extCheck($ext, true);
}
if (function_exists('curl_version')) {
$c->addRequire('Ensuring that cURL can send https requests', function () {
$version = curl_version();
return in_array('https', $version['protocols'], true);
}, 'cURL must be able to send https requests');
}
$c->addRequire('Ensuring that file_get_contents works', function () {
return function_exists('file_get_contents');
}, 'file_get_contents has been disabled');
$c->title('System recommendations');
$c->addRecommend(
'Checking if PHP version is >= 5.4.1',
function () { return version_compare(phpversion(), '5.4.1', '>='); },
'You are using an older version of PHP (' . phpversion() . '). Consider updating to PHP 5.4.1 or newer to improve the performance and stability of the SDK.'
);
$c->addRecommend('Checking if you are running on a 64-bit platform', function () {
return PHP_INT_MAX === 9223372036854775807;
}, 'You are not running on a 64-bit installation of PHP. You may run into issues uploading or downloading files larger than 2GB.');
$c->iniCheck('Ensuring that zend.enable_gc is enabled', 'zend.enable_gc', true, false);
$c->check('Ensuring that date.timezone is set', function () {
return (bool) ini_get('date.timezone');
}, 'The date.timezone PHP ini setting has not been set in ' . php_ini_loaded_file(), false);
if (extension_loaded('xdebug')) {
$c->addRecommend('Checking if Xdebug is installed', function () { return false; }, 'Xdebug is installed. Consider uninstalling Xdebug to make the SDK run much faster.');
$c->iniCheck('Ensuring that Xdebug\'s infinite recursion detection does not erroneously cause a fatal error', 'xdebug.max_nesting_level', 0, false);
}
$c->extCheck('openssl', false);
$c->extCheck('zlib', false);
$c->extCheck('uri_template', false, 'Installing the uri_template extension will make the SDK faster. Install using pecl install uri_template-alpha');
// Is an opcode cache installed or are they running >= PHP 5.5?
$c->addRecommend(
'Checking if an opcode cache is installed',
function () {
return version_compare(phpversion(), '5.5.0', '>=') || extension_loaded('apc') || extension_loaded('xcache');
},
'You are not utilizing an opcode cache. Consider upgrading to PHP >= 5.5 or install APC.'
);
$c->title('PHP information');
ob_start();
phpinfo(INFO_GENERAL);
$info = ob_get_clean();
$c->write($c->quote($info));
$c->endTest();

43
vendor/aws/aws-sdk-php/composer.json vendored Executable file
View File

@ -0,0 +1,43 @@
{
"name": "aws/aws-sdk-php",
"homepage": "http://aws.amazon.com/sdkforphp2",
"description":"AWS SDK for PHP",
"keywords":["aws","amazon","sdk","s3","ec2","dynamodb"],
"type":"library",
"license":"Apache-2.0",
"authors":[
{
"name":"Amazon Web Services",
"homepage":"http://aws.amazon.com"
}
],
"require": {
"php": ">=5.3.3",
"guzzle/guzzle": "~3.7.0"
},
"suggest": {
"doctrine/cache": "Adds support for caching of credentials and responses",
"ext-apc": "Allows service description opcode caching, request and response caching, and credentials caching",
"ext-openssl": "Allows working with CloudFront private distributions and verifying received SNS messages",
"monolog/monolog": "Adds support for logging HTTP requests and responses",
"symfony/yaml": "Eases the ability to write manifests for creating jobs in AWS Import/Export"
},
"require-dev": {
"doctrine/cache": "~1.0",
"ext-openssl": "*",
"monolog/monolog": "1.4.*",
"phpunit/phpunit": "3.7.*",
"symfony/class-loader": "2.*",
"symfony/yaml": "2.*"
},
"autoload": {
"psr-0": {
"Aws": "src/"
}
},
"extra": {
"branch-alias": {
"dev-master": "2.4.x-dev"
}
}
}

160
vendor/aws/aws-sdk-php/docs/Makefile vendored Normal file
View File

@ -0,0 +1,160 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
TRACKING =
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " pdf to make PDF files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
pdf:
$(SPHINXBUILD) -b pdf $(ALLSPHINXOPTS) $(BUILDDIR)/pdf
@echo
@echo "Build finished. The PDF file is in $(BUILDDIR)/pdf."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/AWSSDKforPHP.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/AWSSDKforPHP.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/AWSSDKforPHP"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/AWSSDKforPHP"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

13
vendor/aws/aws-sdk-php/docs/README.md vendored Normal file
View File

@ -0,0 +1,13 @@
AWS SDK for PHP
===============
Documentation for the [AWS SDK for PHP](https://github.com/aws/aws-sdk-php).
Building the documentation
--------------------------
The documentation is written in [reStructuredText](http://docutils.sourceforge.net/rst.html) and can be built using
[Sphinx](http://sphinx.pocoo.org/).
1. pip install -r requirements.txt
2. Make the HTML documentation: ``make html``

View File

@ -0,0 +1,305 @@
import os, re, subprocess, json, collections
from sphinx.addnodes import toctree
from docutils import io, nodes, statemachine, utils
from docutils.parsers.rst import Directive
from jinja2 import Environment, PackageLoader
# Maintain a cache of previously loaded examples
example_cache = {}
# Maintain a cache of previously loaded service descriptions
description_cache = {}
def setup(app):
"""
see: http://sphinx.pocoo.org/ext/appapi.html
this is the primary extension point for Sphinx
"""
from sphinx.application import Sphinx
if not isinstance(app, Sphinx): return
app.add_role('regions', regions_role)
app.add_directive('service', ServiceIntro)
app.add_directive('example', ExampleDirective)
def regions_role(name, rawtext, text, lineno, inliner, options={}, content={}):
"""Inserts a list of regions available to a service name
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
try:
service_name = str(text)
if not service_name:
raise ValueError
app = inliner.document.settings.env.app
node = make_regions_node(rawtext, app, str(service_name), options)
return [node], []
except ValueError:
msg = inliner.reporter.error(
'The service name "%s" is invalid; ' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
def get_regions(service_name):
"""Get the regions for a service by name
Returns a list of regions
:param service_name: Retrieve regions for this service by name
"""
return load_service_description(service_name)['regions'].keys()
def make_regions_node(rawtext, app, service_name, options):
"""Create a list of regions for a service name
:param rawtext: Text being replaced with the list node.
:param app: Sphinx application context
:param service_name: Service name
:param options: Options dictionary passed to role func.
"""
regions = get_regions(service_name)
return nodes.Text(", ".join(regions))
class ServiceDescription():
"""
Loads the service description for a given source file
"""
def __init__(self, service):
self.service_name = service
self.description = self.load_description(self.determine_filename())
def determine_filename(self):
"""Determines the filename to load for a service"""
# Determine the path to the aws-config
path = os.path.abspath("../src/Aws/Common/Resources/aws-config.php")
self.config = self.__load_php(path)
# Iterate over the loaded dictionary and see if a matching service exists
for key in self.config["services"]:
alias = self.config["services"][key].get("alias", "")
if key == self.service_name or alias == self.service_name:
break
else:
raise ValueError("No service matches %s" % (self.service_name))
# Determine the name of the client class to load
class_path = self.config["services"][key]["class"].replace("\\", "/")
client_path = os.path.abspath("../src/" + class_path + ".php")
contents = open(client_path, 'r').read()
# Determine the current version of the client (look at the LATEST_API_VERSION constant value)
version = re.search("LATEST_API_VERSION = '(.+)'", contents).groups(0)[0]
# Determine the name of the service description used by the client
matches = re.search("__DIR__ \. '/Resources/(.+)\.php'", contents)
description = matches.groups(0)[0] % (version)
# Strip the filename of the client and determine the description path
service_path = "/".join(client_path.split(os.sep)[0:-1])
service_path += "/Resources/" + description + ".php"
return service_path
def load_description(self, path):
"""Determines the filename to load for a service
:param path: Path to a service description to load
"""
return self.__load_php(path)
def __load_php(self, path):
"""Load a PHP script that returns an array using JSON
:param path: Path to the script to load
"""
path = os.path.abspath(path)
# Make command to each environment Linux/Mac and Windows
if os.name == 'nt':
sh = 'php -r \"$c = include \'' + path + '\'; echo json_encode($c);\"'
else:
sh = 'php -r \'$c = include "' + path + '"; echo json_encode($c);\''
loaded = subprocess.check_output(sh, shell=True)
return json.loads(loaded)
def __getitem__(self, i):
"""Allows access to the service description items via the class"""
return self.description.get(i)
def load_service_description(name):
if name not in description_cache:
description_cache[name] = ServiceDescription(name)
return description_cache[name]
class ServiceIntro(Directive):
"""
Creates a service introduction to inject into a document
"""
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
def run(self):
if len(self.arguments) == 2:
api_version = self.arguments[1].strip()
else:
api_version = ""
service_name = self.arguments[0].strip()
d = load_service_description(service_name)
rawtext = self.generate_rst(d, api_version)
tab_width = 4
include_lines = statemachine.string2lines(
rawtext, tab_width, convert_whitespace=1)
self.state_machine.insert_input(
include_lines, os.path.abspath(__file__))
return []
def get_doc_link(self, name, namespace):
"""Determine the documentation link for an endpoint"""
if name == "sts":
return "http://aws.amazon.com/documentation/iam/"
else:
return "http://aws.amazon.com/documentation/" + namespace.lower()
def get_locator_name(self, name):
"""Determine the service locator name for an endpoint"""
return name
def generate_rst(self, d, api_version):
rawtext = ""
scalar = {}
# Sort the operations by key
operations = collections.OrderedDict(sorted(d.description['operations'].items()))
# Grab all of the simple strings from the description
for key in d.description:
if isinstance(d[key], str) or isinstance(d[key], unicode):
scalar[key] = d[key]
# Add substitutions for top-level data in a service description
rawtext += ".. |%s| replace:: %s\n\n" % (key, scalar[key])
# Add magic methods to each operation
for key in operations:
operations[key]['magicMethod'] = key[0].lower() + key[1:]
# Set the ordered dict of operations on the description
d.description['operations'] = operations
# Determine the service locator name and doc URL
locator_name = self.get_locator_name(d["namespace"])
docs = self.get_doc_link(locator_name, d["namespace"])
# Determine the "namespace" used for linking to API docs
if api_version:
apiVersionSuffix = "_" + api_version.replace("-", "_")
else:
apiVersionSuffix = ""
env = Environment(loader=PackageLoader('aws', 'templates'))
template = env.get_template("client_intro")
rawtext += template.render(
scalar,
description=d.description,
regions=get_regions(d["namespace"]),
locator_name=locator_name,
doc_url=docs,
specifiedApiVersion=api_version,
apiVersionSuffix=apiVersionSuffix)
return rawtext
class ExampleDirective(Directive):
"""
Inserts a formatted PHPUnit example into the source
"""
# Directive configuration
required_arguments = 2
optional_arguments = 0
final_argument_whitespace = True
def run(self):
self.end_function = " }\n"
self.begin_tag = " // @begin\n"
self.end_tag = " // @end\n"
example_file = self.arguments[0].strip()
example_name = self.arguments[1].strip()
if not example_name:
raise ValueError("Must specify both an example file and example name")
contents = self.load_example(example_file, example_name)
rawtext = self.generate_rst(contents)
tab_width = 4
include_lines = statemachine.string2lines(
rawtext, tab_width, convert_whitespace=1)
self.state_machine.insert_input(
include_lines, os.path.abspath(__file__))
return []
def load_example(self, example_file, example_name):
"""Loads the contents of an example and strips out non-example parts"""
key = example_file + '.' + example_name
# Check if this example is cached already
if key in example_cache:
return example_cache[key]
# Not cached, so index the example file functions
path = os.path.abspath(__file__ + "/../../../../tests/Aws/Tests/" + example_file)
f = open(path, 'r')
in_example = False
capturing = False
buffer = ""
# Scan each line of the file and create example hashes
for line in f:
if in_example:
if line == self.end_function:
if in_example:
example_cache[in_example] = buffer
buffer = ""
in_example = False
elif line == self.begin_tag:
# Look for the opening // @begin tag to begin capturing
buffer = ""
capturing = True
elif line == self.end_tag:
# Look for the optional closing tag to stop capturing
capturing = False
elif capturing:
buffer += line
elif "public function test" in line:
# Grab the function name from the line and keep track of the
# name of the current example being captured
current_name = re.search('function (.+)\s*\(', line).group(1)
in_example = example_file + "." + current_name
f.close()
return example_cache[key]
def generate_rst(self, contents):
rawtext = ".. code-block:: php\n\n" + contents
return rawtext

View File

@ -0,0 +1,90 @@
====================================================================================
{{serviceFullName}}{% if specifiedApiVersion %} ({{specifiedApiVersion}}){% endif %}
====================================================================================
This guide focuses on the AWS SDK for PHP client for `{{ serviceFullName }} <{{ doc_url }}>`_. This guide assumes that
you have already downloaded and installed the AWS SDK for PHP. See :doc:`installation` for more information on
getting started.
{% if specifiedApiVersion %}
**Note:** This guide is for the **{{ specifiedApiVersion }}** API version of {{ serviceFullName }}. You may also be
interested in the :doc:`guide for the latest API version of {{ serviceFullName }} <service-{{ namespace|lower }}>`.
{% endif %}
.. _{{ namespace }}{{ apiVersionSuffix }}_operations:
Available operations
--------------------
Please see the `{{ serviceFullName }} Client API reference <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.{{ namespace }}{{ apiVersionSuffix }}.{{ namespace }}Client{{ apiVersionSuffix }}.html>`_
for a details about all of the available methods, including descriptions of the inputs and outputs.
{# Here we are creating a list-table. The contents of a list-table looks like:
* - Foo
- Bar
* - Baz
- Bam
We must also ensure that the same number of columns are available for each table row.
#}
.. list-table::
:header-rows: 0
:stub-columns: 0
:class: two-column
{% for key, op in description.operations.iteritems() %}
{% if loop.index is odd %}* {% else %} {% endif %}- `{{ key }} <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.{{ namespace}}{{ apiVersionSuffix }}.{{ namespace }}Client{{ apiVersionSuffix }}.html#_{{ op.magicMethod }}>`_
{%- if op.documentationUrl %} (`service docs <{{ op.documentationUrl}}>`_){%- endif %}
{%- if loop.last and loop.index is odd %}
-
{%- endif %}
{% endfor %}
Creating a client
-----------------
First you need to create a client object using one of the following techniques.
Factory method
~~~~~~~~~~~~~~
The easiest way to get up and running quickly is to use the ``Aws\{{namespace}}\{{namespace}}Client::factory()`` method
and provide your credentials (``key`` and ``secret``).
{% if not globalEndpoint -%}
A ``region`` parameter is also required and must be set to one of the following values: ``{{ regions|join("``, ``") }}``
{% endif %}
.. code-block:: php
use Aws\{{namespace}}\{{namespace}}Client;
$client = {{namespace}}Client::factory(array(
'key' {% if specifiedApiVersion %} {% endif %}=> '<aws access key>',
'secret' {% if specifiedApiVersion %} {% endif %}=> '<aws secret key>'{% if not globalEndpoint -%},
'region' {% if specifiedApiVersion %} {% endif %}=> '<region name>'{% endif %}{% if specifiedApiVersion -%},
'version' => '{{specifiedApiVersion}}'{% endif %}
));
You can provide your access keys like in the preceding example, or you can choose to omit them if you are using `AWS
Identity and Access Management (IAM) roles for EC2 instances
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UsingIAM.html#UsingIAMrolesWithAmazonEC2Instances>`_ or credentials
sourced from the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment variables.
Service locator
~~~~~~~~~~~~~~~
A more robust way to connect to {{ serviceFullName }} is through the service locator. This allows you to specify
credentials and other configuration settings in a configuration file. These settings can then be shared across all
clients so that you only have to specify your settings once.
.. code-block:: php
use Aws\Common\Aws;
// Create a service builder using a configuration file
$aws = Aws::factory('/path/to/my_config.json');
// Get the client from the builder by namespace
$client = $aws->get('{{ namespace }}');

View File

@ -0,0 +1,10 @@
------------------------------
.. admonition:: This guide is incomplete
This guide is not quite finished. If you are looking for a good way to contribute to the SDK and to the rest of
the AWS PHP community, then helping to write documentation is a great place to start. Our guides are written
in `ReStructuredText <http://docutils.sourceforge.net/rst.html>`_ and generated using
`Sphinx <http://sphinx-doc.org/>`_. Feel free to add some content to our documentation and send a pull request
to https://github.com/aws/aws-sdk-php. You can view our documentation sources at
https://github.com/aws/aws-sdk-php/tree/master/docs.

View File

@ -0,0 +1,20 @@
Some AWS operations return truncated results that require subsequent requests in order to retrieve the entire result
set. The subsequent requests typically require pagination tokens or markers from the previous request in order to
retrieve the next set of results. Working with these tokens can be cumbersome, since you must manually keep track of
them, and the API for each service may differ in how it uses them.
The AWS SDK for PHP has a feature called **iterators** that allow you to retrieve an *entire* result set without
manually handling pagination tokens or markers. The iterators in the SDK implement PHP's ``Iterator`` interface, which
allows you to easily enumerate or iterate through resources from a result set with ``foreach``.
You can find a list of the iterators supported by a client by viewing the docblock of a client. Any ``@method`` tag that
has a name that looks like "``get[…]Iterator``" will return an iterator. For example, the following code uses the
``getListObjectsIterator()`` method of the S3 client object to create an iterator for objects in a bucket.
.. code-block:: php
$iterator = $client->getListObjectsIterator(array('Bucket' => 'my-bucket'));
foreach ($iterator as $object) {
echo $object['Key'] . "\n";
}

View File

@ -0,0 +1,33 @@
The result of a command is a `Model <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Guzzle.Service.Resource.Model.html>`_
(``Guzzle\Service\Resource\Model``) object. This object contains the data from a response body and can be used like an
array (e.g., ``$result['TableName']``). It also has convenience methods like ``get()``, ``getPath()``, and
``toArray()``. The contents of the response model depend on the command that was executed and are documented in the API
docs for each operation (e.g., see the *Returns* section in the API docs for the `S3 GetObject operation
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.S3.S3Client.html#_getObject>`_).
.. code-block:: php
// Use an instance of S3Client to get an object
$result = $client->getObject(array(
'Bucket' => 'my-bucket',
'Key' => 'test.txt'
));
// Introspect the keys
var_export($result->getKeys());
//> array( 'Body', 'ContentLength', 'DeleteMarker', 'Expiration', ... )
// Get a value
echo $result['ContentLength'];
// OR
echo $result->get('ContentLength');
//> 6
// Get a nested value
echo $result->getPath('Metadata/CustomValue');
//> Testing123
// Get an array of the data
var_export($result->toArray());
//> array ( 'Body' => 'Hello!' , 'ContentLength' => 6, ... )

View File

@ -0,0 +1,10 @@
One of the higher-level abstractions provided by the SDK are **waiters**. Waiters help make it easier to work with
*eventually consistent* systems by providing an easy way to wait until a resource enters into a particular state by
polling the resource. You can find a list of the waiters supported by a client by viewing the docblock of a client. Any
``@method`` tag that has a name starting with "``waitUntil``" will utilize a waiter.
.. code-block:: php
$client->waitUntilBucketExists(array('Bucket' => 'my-bucket'));
The preceding method invocation will instantiate a waiter object and poll the bucket until it exists.

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

View File

@ -0,0 +1,5 @@
<div class="left-bar-other">
<h3>Feedback</h3>
<p class="feedback">Did you find this page useful? Do you have a suggestion? <a href="https://portal.aws.amazon.com/gp/aws/html-forms-controller/documentation/aws_doc_feedback_04?service_name=AWS%20SDK%20for%20PHP&guide_name=Guide&api_version={{ version }}&file_name={{ pagename }}">Give us feedback</a> or
send us a <a href="https://github.com/aws/aws-sdk-php">pull request</a> on GitHub.</p>
</div>

View File

@ -0,0 +1,28 @@
{%- extends "!layout.html" %}
{%- block nav_links %}
<li><a href="{{ pathto(master_doc) }}">Docs</a></li>
<li><a href="http://docs.aws.amazon.com/aws-sdk-php/latest/">API Docs</a></li>
<li><a href="https://forums.aws.amazon.com/forum.jspa?forumID=80">Forum</a></li>
<li><a href="https://github.com/aws/aws-sdk-php/issues">Issues</a></li>
<li><a href="https://packagist.org/packages/aws/aws-sdk-php">Packagist</a></li>
<li><a href="http://aws.amazon.com/sdkforphp">Homepage</a></li>
<li><a href="/aws-sdk-php/guide/latest/aws-sdk-php-guide.pdf">PDF</a></li>
{%- endblock %}
{% block ga %}
{% if theme_google_analytics_account %}
<script type="text/javascript" src="https://d36cz9buwru1tt.cloudfront.net/amznUrchin.js"></script>
<!-- SiteCatalyst code version: H.25.2.
Copyright 1996-2012 Adobe, Inc. All Rights Reserved
More info available at http://www.omniture.com -->
<script type="text/javascript" src="https://d36cz9buwru1tt.cloudfront.net/js/sitecatalyst/s_code.min.js"></script>
<script type="text/javascript"><!--
s.prop66='AWS SDK for PHP'; s.eVar66='D=c66';
s.prop65='User Guide'; s.eVar65='D=c65';
var s_code=s.t();if(s_code)document.write(s_code);
//--></script>
<script type="text/javascript"><!--if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-')//--></script>
<noscript><img src="http://amazonwebservices.d2.sc.omtrdc.net/b/ss/awsamazondev/1/H.25.2--NS/0" height="1" width="1" border="0" alt=""></noscript><!--/DO NOT REMOVE/--><!-- End SiteCatalyst code version: H.25.2. -->
{% endif %}
{% endblock %}

View File

@ -0,0 +1,5 @@
<p class="logo left-bar-other">
<a href="{{ pathto(master_doc) }}">
<img class="logo" src="{{ pathto('_static/logo.png', 1) }}" alt="Logo" height="63" />
</a>
</p>

View File

@ -0,0 +1,60 @@
==================
Signing Up for AWS
==================
Creating an AWS account
-----------------------
Before you begin, you need to create an account. When you sign up for AWS, AWS signs your account up for all services.
You are charged only for the services you use.
To sign up for AWS
~~~~~~~~~~~~~~~~~~
#. Go to http://aws.amazon.com and click **Sign Up Now**.
#. Follow the on-screen instructions.
AWS sends you a confirmation email after the sign-up process is complete. At any time, you can view your current account
activity and manage your account at http://aws.amazon.com/account. From the **My Account** page, you can view current
charges and account activity and download usage reports.
To view your AWS credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Go to http://aws.amazon.com/.
#. Click **My Account/Console**, and then click **Security Credentials**.
#. Under **Your Account**, click **Security Credentials**.
#. In the spaces provided, type your user name and password, and then click **Sign in using our secure server**.
#. Under **Access Credentials**, on the **Access Keys** tab, your access key ID is displayed. To view your secret key,
under **Secret Access Key**, click **Show**.
Your secret key must remain a secret that is known only by you and AWS. Keep it confidential in order to protect your
account. Store it securely in a safe place, and never email it. Do not share it outside your organization, even if an
inquiry appears to come from AWS or Amazon.com. No one who legitimately represents Amazon will ever ask you for your
secret key.
Getting your AWS credentials
----------------------------
In order to use the AWS SDK for PHP, you need your AWS Access Key ID and Secret Access Key.
To get your AWS Access Key ID and Secret Access Key
- Go to http://aws.amazon.com/.
- Click **Account** and then click **Security Credentials**. The Security Credentials page displays (you might be
prompted to log in).
- Scroll down to Access Credentials and make sure the **Access Keys** tab is selected. The AWS Access Key ID appears in
the Access Key column.
- To view the Secret Access Key, click **Show**.
.. note::
**Important: Your Secret Access Key is a secret**, which only you and AWS should know. It is important to keep it confidential
to protect your account. Store it securely in a safe place. Never include it in your requests to AWS, and never
e-mail it to anyone. Do not share it outside your organization, even if an inquiry appears to come from AWS or
Amazon.com. No one who legitimately represents Amazon will ever ask you for your Secret Access Key.

270
vendor/aws/aws-sdk-php/docs/conf.py vendored Normal file
View File

@ -0,0 +1,270 @@
# -*- coding: utf-8 -*-
#
# AWS SDK for PHP documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 10 19:00:11 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, subprocess
# Don't require opening PHP tags in PHP examples
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add our custom extensions
sys.path.append(os.path.abspath('_ext/'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['aws', 'rst2pdf.pdfbuilder']
# index, rst2pdf, title, author
pdf_documents = [('index', u'aws-sdk-php-guide', u'AWS SDK for PHP', u'Amazon Web Services')]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AWS SDK for PHP'
copyright = u'2013, Amazon Web Services'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = subprocess.check_output('git describe --abbrev=0 --tags', shell=True).strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['sidebarlogo.html', 'localtoc.html', 'searchbox.html', 'feedback.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AWSSDKforPHPdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'AWSSDKforPHP.tex', u'AWS SDK for PHP Documentation',
u'Amazon Web Services', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'awssdkforphp', u'AWS SDK for PHP Documentation',
[u'Amazon Web Services'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AWSSDKforPHP', u'AWS SDK for PHP Documentation',
u'Amazon Web Services', 'AWSSDKforPHP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- HTML theme settings ------------------------------------------------
import guzzle_sphinx_theme
extensions.append("guzzle_sphinx_theme")
pygments_style = 'guzzle_sphinx_theme.GuzzleStyle'
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
# hack to add tracking
"google_analytics_account": os.getenv('TRACKING', False),
"project_nav_name": "AWS SDK for PHP",
"github_user": "aws",
"github_repo": "aws-sdk-php",
"base_url": "http://docs.aws.amazon.com/aws-sdk-php/guide/latest/"
}

View File

@ -0,0 +1,244 @@
Configuration
=============
When passing an array of parameters to the first argument of ``Aws\Common\Aws::factory()``, the service builder loads
the default ``aws-config.php`` file and merges the array of shared parameters into the default configuration.
Excerpt from ``src/Aws/Common/Resources/aws-config.php``:
.. code-block:: php
<?php return array(
'services' => array(
'default_settings' => array(
'params' => array()
),
'dynamodb' => array(
'alias' => 'DynamoDb',
'extends' => 'default_settings',
'class' => 'Aws\DynamoDb\DynamoDbClient'
),
's3' => array(
'alias' => 'S3',
'extends' => 'default_settings',
'class' => 'Aws\S3\S3Client'
)
)
);
The ``aws-config.php`` file provides default configuration settings for associating client classes with service names.
This file tells the ``Aws\Common\Aws`` service builder which class to instantiate when you reference a client by name.
You can supply your credentials and other configuration settings to the service builder so that each client is
instantiated with those settings. To do this, pass an array of settings (including your ``key`` and ``secret``) into the
first argument of ``Aws\Common\Aws::factory()``.
Using a Custom Configuration File
---------------------------------
You can use a custom configuration file that allows you to create custom named clients with pre-configured settings.
Let's say you want to use the default ``aws-config.php`` settings, but you want to supply your keys using a
configuration file. Each service defined in the default configuration file extends from ``default_settings`` service.
You can create a custom configuration file that extends the default configuration file and add credentials to the
``default_settings`` service:
.. code-block:: php
<?php return array(
'includes' => array('_aws'),
'services' => array(
'default_settings' => array(
'params' => array(
'key' => 'your-aws-access-key-id',
'secret' => 'your-aws-secret-access-key',
'region' => 'us-west-2'
)
)
)
);
Make sure to include the ``'includes' => array('_aws'),`` line in your configuration file, because this extends the
default configuration that makes all of the service clients available to the service builder. If this is missing, then
you will get an exception when trying to retrieve a service client.
You can use your custom configuration file with the ``Aws\Common\Aws`` class by passing the full path to the
configuration file in the first argument of the ``factory()`` method:
.. code-block:: php
<?php
require 'vendor/autoload.php';
use Aws\Common\Aws;
$aws = Aws::factory('/path/to/custom/config.php');
You can create custom named services if you need to use multiple accounts with the same service:
.. code-block:: php
<?php return array(
'includes' => array('_aws'),
'services' => array(
'foo.dynamodb' => array(
'extends' => 'dynamodb',
'params' => array(
'key' => 'your-aws-access-key-id-for-foo',
'secret' => 'your-aws-secret-access-key-for-foo',
'region' => 'us-west-2'
)
),
'bar.dynamodb' => array(
'extends' => 'dynamodb',
'params' => array(
'key' => 'your-aws-access-key-id-for-bar',
'secret' => 'your-aws-secret-access-key-for-bar',
'region' => 'us-west-2'
)
)
)
);
If you prefer JSON syntax, you can define your configuration in JSON format instead of PHP.
.. code-block:: js
{
"includes": ["_aws"],
"services": {
"default_settings": {
"params": {
"key": "your-aws-access-key-id",
"secret": "your-aws-secret-access-key",
"region": "us-west-2"
}
}
}
}
What Happens If You Do Not Provide Credentials?
-----------------------------------------------
The SDK needs your AWS Access Key ID and Secret Access Key in order to make requests to AWS. However, you are not
required to provide your credentials at the time you instantiate the SDK or service client.
Using Environment Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you do not provide credentials, the SDK will attempt to find credentials in your environment by checking in
``$_SERVER`` and using the ``getenv()`` function to look for the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_KEY``
environment variables.
If you are hosting your application on AWS Elastic Beanstalk, you can set the ``AWS_ACCESS_KEY_ID`` and
``AWS_SECRET_KEY`` environment variables through the AWS Elastic Beanstalk console so that the SDK can use those
credentials automatically.
Using Instance Profile Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you do not provide credentials and there are no environment credentials available, the SDK will attempt to retrieve
`IAM Instance Profile credentials <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UsingIAM.html#UsingIAMrolesWithAmazonEC2Instances>`_.
These credentials are only available on Amazon EC2 instances configured with an IAM role.
If absolutely no credentials are provided or found, you will receive an
``Aws\Common\Exception\InstanceProfileCredentialsException`` when you try to make a request.
Instance Profile Credentials are not supported by every service. `Please check if the service you are using supports
temporary credentials <http://docs.aws.amazon.com/STS/latest/UsingSTS/UsingTokens.html>`_.
Manually Setting Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can also manually set your credentials after the service client has been instantiated. To do this, use the
``setCredentials()`` method to set an entirely new ``Credentials`` object for the client.
.. code-block:: php
<?php
require 'vendor/autoload.php';
use Aws\S3\S3Client;
use Aws\Common\Credentials\Credentials;
$s3 = S3Client::factory();
$newCredentials = new Credentials('your-aws-access-key-id', 'your-aws-secret-access-key');
$s3->setCredentials($newCredentials);
Setting a region
----------------
Some clients require a ``region`` configuration setting. You can find out if the client you are using requires a region
and the regions available to a client by consulting the documentation for that particular client
(see :ref:`supported-services`).
Here's an example of creating an Amazon DynamoDB client that uses the ``us-west-1`` region:
.. code-block:: php
require 'vendor/autoload.php';
use Aws\DynamoDb\DynamoDbClient;
// Create a client that uses the us-west-1 region
$client = DynamoDbClient::factory(array(
'key' => 'abc',
'secret' => '123',
'region' => 'us-west-1'
));
Setting a custom endpoint
~~~~~~~~~~~~~~~~~~~~~~~~~
You can specify a completely customized endpoint for a client using the client's ``base_url`` option. If the client you
are using requires a region, then must still specify the name of the region using the ``region`` option. Setting a
custom endpoint can be useful if you're using a mock web server that emulates a web service, you're testing against a
private beta endpoint, or you are trying to a use a region not yet supported by the SDK.
Here's an example of creating an Amazon DynamoDB client that uses a completely customized endpoint:
.. code-block:: php
require 'vendor/autoload.php';
use Aws\DynamoDb\DynamoDbClient;
// Create a client that that contacts a completely customized base URL
$client = DynamoDbClient::factory(array(
'base_url' => 'http://my-custom-url',
'region' => 'my-region-1',
'key' => 'abc',
'secret' => '123'
));
If your custom endpoint uses signature version 4 and must be signed with custom signature scoping values, then you can
specify the signature scoping values using ``signature.service`` (the scoped name of the service) and
``signature.region`` (the region that you are contacting). These values are typically not required.
Using a proxy
~~~~~~~~~~~~~
You can send requests with the AWS SDK for PHP through a proxy using the "request options" of a client. These
"request options" are applied to each HTTP request sent from the client. One of the option settings that can be
specified is the `proxy` option.
Request options are passed to a client through the client's factory method:
.. code-block:: php
use Aws\S3\S3Client;
$s3 = S3Client::factory(array(
'request.options' => array(
'proxy' => '127.0.0.1:123'
)
));
The above example tells the client that all requests should be proxied through an HTTP proxy located at the
`127.0.0.1` IP address using port `123`.
You can supply a username and password when specifying your proxy setting if needed, using the format of
``username:password@host:port``.

187
vendor/aws/aws-sdk-php/docs/faq.rst vendored Normal file
View File

@ -0,0 +1,187 @@
================================
Frequently Asked Questions (FAQ)
================================
What methods are available on a client?
---------------------------------------
The AWS SDK for PHP utilizes service descriptions and dynamic
`magic __call() methods <http://www.php.net/manual/en/language.oop5.overloading.php#object.call>`_ to execute API
operations. Every magic method supported by a client is documented in the docblock of a client class using ``@method``
annotations. Several PHP IDEs, including `PHPStorm <http://www.jetbrains.com/phpstorm/>`_ and
`Zend Studio <http://www.zend.com/en/products/studio/>`_, are able to autocomplete based on ``@method`` annotations.
You can find a full list of methods available for a web service client in the
`API documentation <http://docs.aws.amazon.com/aws-sdk-php/latest/index.html>`_ of the client or in the
`user guide <http://docs.aws.amazon.com/aws-sdk-php/guide/latest/index.html>`_ for that client.
For example, the Amazon S3 client supports the following operations: :ref:`S3_operations`
What do I do about a cURL SSL certificate error?
------------------------------------------------
This issue can occur when using an out of date CA bundle with cURL and SSL. You
can get around this issue by updating the CA bundle on your server or downloading
a more up to date CA bundle from the `cURL website directly <http://curl.haxx.se/ca/cacert.pem>`_.
Simply download a more up to date CA bundle somewhere on your system and instruct the
SDK to use that CA bundle rather than the default. You can configure the SDK to
use a more up to date CA bundle by specifying the ``ssl.certificate_authority``
in a client's factory method or the configuration settings used with
``Aws\Common\Aws``.
.. code-block:: php
$aws = Aws\Common\Aws::factory(array(
'region' => 'us-west-2',
'key' => '****',
'secret' => '****',
'ssl.certificate_authority' => '/path/to/updated/cacert.pem'
));
You can find out more about how cURL bundles the CA bundle here: http://curl.haxx.se/docs/caextract.html
How do I disable SSL?
---------------------
.. warning::
Because SSL requires all data to be encrypted and requires more TCP packets to complete a connection handshake than
just TCP, disabling SSL may provide a small performance improvement. However, with SSL disabled, all data is sent
over the wire unencrypted. Before disabling SSL, you must carefully consider the security implications and the
potential for eavesdropping over the network.
You can disable SSL by setting the ``scheme`` parameter in a client factory method to 'http'.
.. code-block:: php
$client = Aws\DynamoDb\DynamoDbClient::factory(array(
'region' => 'us-west-2',
'scheme' => 'http'
));
How can I make the SDK faster?
------------------------------
See :doc:`performance` for more information.
Why can't I upload or download files greater than 2GB?
------------------------------------------------------
Because PHP's integer type is signed and many platforms use 32-bit integers, the
AWS SDK for PHP does not correctly handle files larger than 2GB on a 32-bit stack
(where "stack" includes CPU, OS, web server, and PHP binary). This is a
`well-known PHP issue <http://www.google.com/search?q=php+2gb+32-bit>`_. In the
case of Microsoft® Windows®, there are no official builds of PHP that support
64-bit integers.
The recommended solution is to use a `64-bit Linux stack <http://aws.amazon.com/amazon-linux-ami/>`_,
such as the 64-bit Amazon Linux AMI with the latest version of PHP installed.
For more information, please see: `PHP filesize :Return values <http://docs.php.net/manual/en/function.filesize.php#refsect1-function.filesize-returnvalues>`_.
How can I see what data is sent over the wire?
----------------------------------------------
You can attach a ``Guzzle\Plugin\Log\LogPlugin`` to any client to see all request and
response data sent over the wire. The LogPlugin works with any logger that implements
the ``Guzzle\Log\LogAdapterInterface`` interface (currently Monolog, ZF1, ZF2).
If you just want to quickly see what data is being sent over the wire, you can
simply attach a debug log plugin to your client.
.. code-block:: php
use Guzzle\Plugin\Log\LogPlugin;
// Create an Amazon S3 client
$s3Client = S3Client::factory();
// Add a debug log plugin
$s3Client->addSubscriber(LogPlugin::getDebugPlugin());
For more complex logging or logging to a file, you can build a LogPlugin manually.
.. code-block:: php
use Guzzle\Common\Log\MonologLogAdapter;
use Guzzle\Plugin\Log\LogPlugin;
use Monolog\Logger;
use Monolog\Handler\StreamHandler;
// Create a log channel
$log = new Logger('aws');
$log->pushHandler(new StreamHandler('/path/to/your.log', Logger::WARNING));
// Create a log adapter for Monolog
$logger = new MonologLogAdapter($log);
// Create the LogPlugin
$logPlugin = new LogPlugin($logger);
// Create an Amazon S3 client
$s3Client = S3Client::factory();
// Add the LogPlugin to the client
$s3Client->addSubscriber($logPlugin);
You can find out more about the LogPlugin on the Guzzle website: http://guzzlephp.org/guide/plugins.html#log-plugin
How can I set arbitrary headers on a request?
---------------------------------------------
You can add any arbitrary headers to a service operation by setting the ``command.headers`` value. The following example
shows how to add an ``X-Foo-Baz`` header to an Amazon S3 PutObject operation.
.. code-block:: php
$s3Client = S3Client::factory();
$s3Client->putObject(array(
'Key' => 'test',
'Bucket' => 'mybucket',
'command.headers' => array(
'X-Foo-Baz' => 'Bar'
)
));
Does the SDK follow semantic versioning?
----------------------------------------
Yes. The SDK follows a semantic versioning scheme similar to but not the same as `semver <http://semver.org>`_.
Instead of the **MAJOR.MINOR.PATCH** scheme specified by semver, the SDK actually follows a scheme that looks like
**PARADIGM.MAJOR.MINOR** where:
1. The **PARADIGM** version number is incremented when **drastic, breaking changes** are made to the SDK, such that the
fundamental way of using the SDK is different. You are probably aware that version 1.x and version 2.x of the AWS SDK
for PHP are *very* different.
2. The **MAJOR** version number is incremented when **breaking changes** are made to the API. These are usually small
changes, and only occur when one of the services makes breaking changes changes to their API. Make sure to check the
`CHANGELOG <>`_ when these changes occur.
3. The **MINOR** version number is incremented when any **backwards-compatible** change is made, whether it's a new
feature or a bug fix.
The best way to ensure that you are not affected by breaking changes is to set your dependency on the SDK in Composer to
stay within a particular **PARADIGM.MAJOR** version. This can be done using the wildcard syntax:
.. code-block:: json
{
"require": {
"aws/aws-sdk-php": "2.4.*"
}
}
Or by using the the tilde operator:
.. code-block:: json
{
"require": {
"aws/aws-sdk-php": "~2.4.9"
}
}
See the `Composer documentation <http://getcomposer.org/doc/01-basic-usage.md#package-versions>`_ for more information
on configuring your dependencies.
The SDK may at some point adopt the semver standard, but this will probably not happen until the next paradigm-type
change.

View File

@ -0,0 +1,292 @@
========================
DynamoDB Session Handler
========================
Introduction
------------
The **DynamoDB Session Handler** is a custom session handler for PHP that allows developers to use Amazon DynamoDB as a
session store. Using DynamoDB for session storage alleviates issues that occur with session handling in a distributed
web application by moving sessions off of the local file system and into a shared location. DynamoDB is fast, scalable,
easy to setup, and handles replication of your data automatically.
The DynamoDB Session Handler uses the ``session_set_save_handler()`` function to hook DynamoDB operations into PHP's
`native session functions <http://www.php.net/manual/en/ref.session.php>`_ to allow for a true drop in replacement. This
includes support for features like session locking and garbage collection which are a part of PHP's default session
handler.
For more information on the Amazon DynamoDB service, please visit the `Amazon DynamoDB homepage
<http://aws.amazon.com/dynamodb>`_.
Basic Usage
-----------
1. Register the handler
~~~~~~~~~~~~~~~~~~~~~~~
The first step is to instantiate the Amazon DynamoDB client and register the session handler.
.. code-block:: php
require 'vendor/autoload.php';
use Aws\DynamoDb\DynamoDbClient;
$dynamoDb = DynamoDbClient::factory(array(
'key' => '<aws access key>',
'secret' => '<aws secret key>',
'region' => '<region name>'
));
$sessionHandler = $dynamoDb->registerSessionHandler(array(
'table_name' => 'sessions'
));
You can also instantiate the ``SessionHandler`` object directly using it's ``factory`` method.
.. code-block:: php
require 'vendor/autoload.php';
use Aws\DynamoDb\DynamoDbClient;
use Aws\DynamoDb\Session\SessionHandler;
$dynamoDb = DynamoDbClient::factory(array(
'key' => '<aws access key>',
'secret' => '<aws secret key>',
'region' => '<region name>',
));
$sessionHandler = SessionHandler::factory(array(
'dynamodb_client' => $dynamoDb,
'table_name' => 'sessions',
));
$sessionHandler->register();
2. Create a table for storing your sessions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before you can actually use the session handler, you need to create a table in which to store the sessions. This can be
done ahead of time through the `AWS Console for Amazon DynamoDB <https://console.aws.amazon.com/dynamodb/home>`_, or you
can use the session handler object (which you've already configured with the table name) by doing the following:
.. code-block:: php
$sessionHandler->createSessionsTable(5, 5);
The two parameters for this function are used to specify the read and write provisioned throughput for the table,
respectively.
.. note::
The ``createSessionsTable`` function uses the ``TableExists`` :doc:`waiter <feature-waiters>` internally, so this
function call will block until the table exists and is ready to be used.
3. Use PHP sessions like normal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Once the session handler is registered and the table exists, you can write to and read from the session using the
``$_SESSION`` superglobal, just like you normally do with PHP's default session handler. The DynamoDB Session Handler
encapsulates and abstracts the interactions with Amazon DynamoDB and enables you to simply use PHP's native session
functions and interface.
.. code-block:: php
// Start the session
session_start();
// Alter the session data
$_SESSION['user.name'] = 'jeremy';
$_SESSION['user.role'] = 'admin';
// Close the session (optional, but recommended)
session_write_close();
Configuration
-------------
You may configure the behavior of the session handler using the following options. All options are optional, but you
should make sure to understand what the defaults are.
============================ ===========================================================================================
``table_name`` The name of the DynamoDB table in which to store the sessions. This defaults to ``sessions``.
---------------------------- -------------------------------------------------------------------------------------------
``hash_key`` The name of the hash key in the DynamoDB sessions table. This defaults to ``id``.
---------------------------- -------------------------------------------------------------------------------------------
``session_lifetime`` The lifetime of an inactive session before it should be garbage collected. If it is not
provided, then the actual lifetime value that will be used is
``ini_get('session.gc_maxlifetime')``.
---------------------------- -------------------------------------------------------------------------------------------
``consistent_read`` Whether or not the session handler should use consistent reads for the ``GetItem``
operation. This defaults to ``true``.
---------------------------- -------------------------------------------------------------------------------------------
``locking_strategy`` The strategy used for doing session locking. By default the handler uses the
``NullLockingStrategy``, which means that session locking is **not** enabled (see the
:ref:`ddbsh-session-locking` section for more information). Valid values for this option
include null, 'null', 'pessemistic', or an instance of ``NullLockingStrategy`` or
``PessimisticLockingStrategy``.
---------------------------- -------------------------------------------------------------------------------------------
``automatic_gc`` Whether or not to use PHP's session auto garbage collection. This defaults to the value of
``(bool) ini_get('session.gc_probability')``, but the recommended value is ``false``. (see
the :ref:`ddbsh-garbage-collection` section for more information).
---------------------------- -------------------------------------------------------------------------------------------
``gc_batch_size`` The batch size used for removing expired sessions during garbage collection. This defaults
to ``25``, which is the maximum size of a single ``BatchWriteItem`` operation. This value
should also take your provisioned throughput into account as well as the timing of your
garbage collection.
---------------------------- -------------------------------------------------------------------------------------------
``gc_operation_delay`` The delay (in seconds) between service operations performed during garbage collection. This
defaults to ``0``. Increasing this value allows you to throttle your own requests in an
attempt to stay within your provisioned throughput capacity during garbage collection.
---------------------------- -------------------------------------------------------------------------------------------
``max_lock_wait_time`` Maximum time (in seconds) that the session handler should wait to acquire a lock before
giving up. This defaults to ``10`` and is only used with the ``PessimisticLockingStrategy``.
---------------------------- -------------------------------------------------------------------------------------------
``min_lock_retry_microtime`` Minimum time (in microseconds) that the session handler should wait between attempts
to acquire a lock. This defaults to ``10000`` and is only used with the
``PessimisticLockingStrategy``.
---------------------------- -------------------------------------------------------------------------------------------
``max_lock_retry_microtime`` Maximum time (in microseconds) that the session handler should wait between attempts
to acquire a lock. This defaults to ``50000`` and is only used with the
``PessimisticLockingStrategy``.
---------------------------- -------------------------------------------------------------------------------------------
``dynamodb_client`` The ``DynamoDbClient`` object that should be used for performing DynamoDB operations. If
you register the session handler from a client object using the ``registerSessionHandler()``
method, this will default to the client you are registering it from. If using the
``SessionHandler::factory()`` method, you are required to provide an instance of
``DynamoDbClient``.
============================ ===========================================================================================
To configure the Session Handler, you must specify the configuration options when you instantiate the handler. The
following code is an example with all of the configuration options specified.
.. code-block:: php
$sessionHandler = $dynamoDb->registerSessionHandler(array(
'table_name' => 'sessions',
'hash_key' => 'id',
'session_lifetime' => 3600,
'consistent_read' => true,
'locking_strategy' => null,
'automatic_gc' => 0,
'gc_batch_size' => 50,
'max_lock_wait_time' => 15,
'min_lock_retry_microtime' => 5000,
'max_lock_retry_microtime' => 50000,
));
Pricing
-------
Aside from data storage and data transfer fees, the costs associated with using Amazon DynamoDB are calculated based on
the provisioned throughput capacity of your table (see the `Amazon DynamoDB pricing details
<http://aws.amazon.com/dynamodb/#pricing>`_). Throughput is measured in units of Write Capacity and Read Capacity. The
Amazon DynamoDB homepage says:
A unit of Write Capacity enables you to perform one write per second for items of up to 1KB in size. Similarly, a
unit of Read Capacity enables you to perform one strongly consistent read per second (or two eventually consistent
reads per second) of items of up to 1KB in size. Larger items will require more capacity. You can calculate the
number of units of read and write capacity you need by estimating the number of reads or writes you need to do per
second and multiplying by the size of your items (rounded up to the nearest KB).
Ultimately, the throughput and the costs required for your sessions table is going to correlate with your expected
traffic and session size. The following table explains the amount of read and write operations that are performed on
your DynamoDB table for each of the session functions.
+----------------------------------------+-----------------------------------------------------------------------------+
| Read via ``session_start()`` | * 1 read operation (only 0.5 if ``consistent_read`` is ``false``). |
| (Using ``NullLockingStrategy``) | * (Conditional) 1 write operation to delete the session if it is expired. |
+----------------------------------------+-----------------------------------------------------------------------------+
| Read via ``session_start()`` | * A minimum of 1 *write* operation. |
| (Using ``PessimisticLockingStrategy``) | * (Conditional) Additional write operations for each attempt at acquiring a |
| | lock on the session. Based on configured lock wait time and retry options.|
| | * (Conditional) 1 write operation to delete the session if it is expired. |
+----------------------------------------+-----------------------------------------------------------------------------+
| Write via ``session_write_close()`` | * 1 write operation. |
+----------------------------------------+-----------------------------------------------------------------------------+
| Delete via ``session_destroy()`` | * 1 write operation. |
+----------------------------------------+-----------------------------------------------------------------------------+
| Garbage Collection | * 0.5 read operations **per KB of data in the table** to scan for expired |
| | sessions. |
| | * 1 write operation **per expired item** to delete it. |
+----------------------------------------+-----------------------------------------------------------------------------+
.. _ddbsh-session-locking:
Session Locking
---------------
The DynamoDB Session Handler supports pessimistic session locking in order to mimic the behavior of PHP's default
session handler. By default the DynamoDB Session Handler has this feature *turned off* since it can become a performance
bottleneck and drive up costs, especially when an application accesses the session when using ajax requests or iframes.
You should carefully consider whether or not your application requires session locking or not before enabling it.
By default the session handler uses the ``NullLockingStrategy`` which does not do any session locking. To enable session
locking, you should use the ``PessimisticLockingStrategy``, which can be specified when the session handler is created.
.. code-block:: php
$sessionHandler = $dynamoDb->registerSessionHandler(array(
'table_name' => 'sessions',
'locking_strategy' => 'pessimistic',
));
.. _ddbsh-garbage-collection:
Garbage Collection
------------------
The DynamoDB Session Handler supports session garbage collection by using a series of ``Scan`` and ``BatchWriteItem``
operations. Due to the nature of how the ``Scan`` operation works and in order to find all of the expired sessions and
delete them, the garbage collection process can require a lot of provisioned throughput.
For this reason it is discouraged to rely on the PHP's normal session garbage collection triggers (i.e., the
``session.gc_probability`` and ``session.gc_divisor`` ini settings). A better practice is to set
``session.gc_probability`` to ``0`` and schedule the garbage collection to occur during an off-peak time where a
burst of consumed throughput will not disrupt the rest of the application.
For example, you could have a nightly cron job trigger a script to run the garbage collection. This script might look
something like the following:
.. code-block:: php
require 'vendor/autoload.php';
use Aws\DynamoDb\DynamoDbClient;
use Aws\DynamoDb\Session\SessionHandler;
$dynamoDb = DynamoDbClient::factory(array(
'key' => '<aws access key>',
'secret' => '<aws secret key>',
'region' => '<region name>',
));
$sessionHandler = SessionHandler::factory(array(
'dynamodb_client' => $dynamoDb,
'table_name' => 'sessions',
));
$sessionHandler->garbageCollect();
You can also use the ``gc_operation_delay`` configuration option on the session handler to introduce delays in between
the ``Scan`` and ``BatchWriteItem`` operations that are performed by the garbage collection process. This will increase
the amount of time it takes the garbage collection to complete, but it can help you spread out the requests made by the
session handler in order to help you stay close to or within your provisioned throughput capacity during garbage
collection.
Best Practices
--------------
#. Create your sessions table in a region that is geographically closest to or in the same region as your application
servers. This will ensure the lowest latency between your application and DynamoDB database.
#. Choose the provisioned throughput capacity of your sessions table carefully, taking into account the expected traffic
to your application and the expected size of your sessions.
#. Monitor your consumed throughput through the AWS Management Console or with Amazon CloudWatch and adjust your
throughput settings as needed to meet the demands of your application.
#. Keep the size of your sessions small. Sessions that are less than 1KB will perform better and require less
provisioned throughput capacity.
#. Do not use session locking unless your application requires it.
#. Instead of using PHP's built-in session garbage collection triggers, schedule your garbage collection via a cron job,
or another scheduling mechanism, to run during off-peak hours. Use the ``gc_operation_delay`` option to add delays
in between the requests performed for the garbage collection process.

View File

@ -0,0 +1,170 @@
=====================
Static Client Facades
=====================
Introduction
------------
Version 2.4 of the AWS SDK for PHP adds the ability to enable and use static client facades. These "facades" provide an
easy, static interface to service clients available in the service builder. For example, when working with a normal
client instance, you might have code that looks like the following:
.. code-block:: php
// Get the configured S3 client from the service builder
$s3 = $aws->get('s3');
// Execute the CreateBucket command using the S3 client
$s3->createBucket(array('Bucket' => 'your-new-bucket-name'));
With client facades enabled, this can also be accomplished with the following code:
.. code-block:: php
// Execute the CreateBucket command using the S3 client
S3::createBucket(array('Bucket' => 'your-new-bucket-name'));
Why Use Client Facades?
-----------------------
The use of static client facades is completely optional. We have included this feature in the SDK in order to appeal to
PHP developers who prefer static notation or who are familiar with PHP frameworks like Code Ignitor, Laravel, or Kohana
where this style of method invocation is common.
Though using static client facades has little real benefit over using client instances, it can make your code more
concise and prevent your from having to inject the service builder or client instance into the context of where you
need the client object. This can make your code easier to write and understand. Whether or not you should use the client
facades is purely a matter of preference.
The way in which client facades work in the AWS SDK for PHP is similar to how `facades work in the Laravel 4
Framework <http://laravel.com/docs/facades>`_. Even though you are calling static classes, all of the method calls are
proxied to method calls on actual client instances — the ones stored in the service builder. This means that the usage
of the clients via the client facades can still be mocked in your unit tests, which removes one of the general
disadvantages to using static classes in object-oriented programming. For information about how to test code that uses
client facades, please see the **Testing Code that Uses Client Facades**
below.
Enabling and Using Client Facades
---------------------------------
To enable static client facades to be used in your application, you must use the ``Aws\Common\Aws::enableFacades``
method when you setup the service builder.
.. code-block:: php
// Include the Composer autoloader
require 'vendor/autoload.php';
// Instantiate the SDK service builder with my config and enable facades
$aws = Aws::factory('/path/to/my_config.php')->enableFacades();
This will setup the client facades and alias them into the global namespace. After that, you can use them anywhere to
have more simple and expressive code for interacting with AWS services.
.. code-block:: php
// List current buckets
echo "Current Buckets:\n";
foreach (S3::getListBucketsIterator() as $bucket) {
echo "{$bucket['Name']}\n";
}
$args = array('Bucket' => 'your-new-bucket-name');
$file = '/path/to/the/file/to/upload.jpg';
// Create a new bucket and wait until it is available for uploads
S3::createBucket($args) and S3::waitUntilBucketExists($args);
echo "\nCreated a new bucket: {$args['Bucket']}.\n";
// Upload a file to the new bucket
$result = S3::putObject($args + array(
'Key' => basename($file),
'Body' => fopen($file, 'r'),
));
echo "\nCreated a new object: {$result['ObjectURL']}\n";
You can also mount the facades into a namespace other than the global namespace. For example, if you wanted to make the
client facades available in the "Services" namespace, then you could do the following:
.. code-block:: php
Aws::factory('/path/to/my_config.php')->enableFacades('Services');
$result = Services\DynamoDb::listTables();
The client facades that are available are determined by what is in your service builder configuration (see
:doc:`configuration`). If you are extending the SDK's default configuration file or not providing one at all, then all
of the clients should be accessible from the service builder instance and client facades (once enabled) by default.
Based on the following excerpt from the default configuration file (located at
``src/Aws/Common/Resources/aws-config.php``):
.. code-block:: php
's3' => array(
'alias' => 'S3',
'extends' => 'default_settings',
'class' => 'Aws\S3\S3Client'
),
The ``'class'`` key indicates the client class that the static client facade will proxy to, and the ``'alias'`` key
indicates what the client facade will be named. Only entries in the service builder config that have both the
``'alias'`` and ``'class'`` keys specified will be mounted as static client facades. You can potentially update or add
to your service builder config to alter or create new or custom client facades.
Testing Code that Uses Client Facades
-------------------------------------
With the static client facades in the SDK, even though you are calling static classes, all of the method calls are
proxied to method calls on actual client instances — the ones stored in the service builder. This means that they can
be mocked during tests, which removes one of the general disadvantages to using static classes in object-oriented
programming.
To mock a client facade for a test, you can explicitly set a mocked client object for the key in the service builder
that would normally contain the client referenced by the client facade. Here is a complete, but contrived, PHPUnit test
showing how this is done:
.. code-block:: php
<?php
use Aws\Common\Aws;
use Guzzle\Service\Resource\Model;
use YourApp\Things\FileBrowser;
class SomeKindOfFileBrowserTest extends PHPUnit_Framework_TestCase
{
private $serviceBuilder;
public function setUp()
{
$this->serviceBuilder = Aws::factory();
$this->serviceBuilder->enableFacades();
}
public function testCanDoSomethingWithYourAppsFileBrowserClass()
{
// Mock the ListBuckets method of S3 client
$mockS3Client = $this->getMockBuilder('Aws\S3\S3Client')
->disableOriginalConstructor()
->getMock();
$mockS3Client->expects($this->any())
->method('listBuckets')
->will($this->returnValue(new Model(array(
'Buckets' => array(
array('Name' => 'foo'),
array('Name' => 'bar'),
array('Name' => 'baz')
)
))));
$this->serviceBuilder->set('s3', $mockS3Client);
// Test the FileBrowser object that uses the S3 client facade internally
$fileBrowser = new FileBrowser();
$partitions = $fileBrowser->getPartitions();
$this->assertEquals(array('foo', 'bar', 'baz'), $partitions);
}
}
Alternatively, if you are specifically only mocking responses from clients, you might consider using the `Guzzle Mock
Plugin <http://guzzlephp.org/plugins/mock-plugin.html>`_.

View File

@ -0,0 +1,108 @@
=========
Iterators
=========
Introduction
------------
.. include:: _snippets/iterators-intro.txt
The "``get[…]Iterator``" methods are all implemented via the ``__call`` magic method, and are a more discoverable
shortcut to using the concrete ``getIterator()`` method, since many IDEs can auto-complete methods defined using the
``@method`` annotation. The following code uses the ``getIterator()`` method, but is equivalent to the previous code
sample.
.. code-block:: php
$iterator = $client->getIterator('ListObjects', array('Bucket' => 'my-bucket'));
foreach ($iterator as $object) {
echo $object['Key'] . "\n";
}
The ``getIterator()`` method also accepts a command object for the first argument. If you have a command object already
instantiated, you can create an iterator directly from the command object.
.. code-block:: php
$command = $client->getCommand('ListObjects', array('Bucket' => 'my-bucket'));
$iterator = $client->getIterator($command);
Iterator Objects
----------------
The actual object returned by ``getIterator()``, and any ``get[…]Iterator()`` method, is an instance of the
``Aws\Common\Iterator\AwsResourceIterator`` class (see the
`API docs <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.Common.Iterator.AwsResourceIterator.html>`_ for
more information about its methods and properties). This class implements PHP's native ``Iterator`` interface, which is
why it works with ``foreach``, can be used with iterator functions like ``iterator_to_array``, and integrates well with
`SPL iterators <http://www.php.net/manual/en/spl.iterators.php>`_ like ``LimitIterator``.
Iterator objects only store one "page" of results at a time and only make as many requests as they need based on the
current iteration. The S3 ``ListObjects`` operation only returns up to 1000 objects at a time. If your bucket has ~10000
objects, then the iterator would need to do 10 requests. However, it does not execute the subsequent requests until
needed. If you are iterating through the results, the first request would happen when you start iterating, and the
second request would not happen until you iterate to the 1001th object. This can help your application save memory by
only holding one page of results at a time.
Basic Configuration
-------------------
Iterators accept an extra set of parameters that are not passed into the commands. You can set a limit on the number of
results you want with the ``limit`` parameter, and you can control how many results you want to get back per request
using the ``page_size`` parameter. If no ``limit`` is specified, then all results are retrieved. If no ``page_size`` is
specified, then the iterator will use the maximum page size allowed by the operation being executed.
The following example will make 10 Amazon S3 ``ListObjects`` requests (assuming there are more than 1000 objects in the
specified bucket) that each return up to 100 objects. The ``foreach`` loop will yield up to 999 objects.
.. code-block:: php
$iterator = $client->getListObjectsIterator(array(
'Bucket' => 'my-bucket'
), array(
'limit' => 999,
'page_size' => 100
));
foreach ($iterator as $object) {
echo $object['Key'] . "\n";
}
There are some limitations to the ``limit`` and ``page_size`` parameters though. Not all operations support specifying
a page size or limit, so the iterator will do its best with what you provide. For example, if an operation always
returns 1000 results, and you specify a limit of 100, the iterator will only yield 100 results, even though the actual
request sent to the service yielded 1000.
Iterator Events
---------------
Iterators emit 2 kinds of events:
1. ``resource_iterator.before_send`` - Emitted right before a request is sent to retrieve results.
2. ``resource_iterator.after_send`` - Emitted right after a request is sent to retrieve results.
Iterator objects extend the ``Guzzle\Common\AbstractHasDispatcher`` class which exposes the ``addSubscriber()`` method
and the ``getEventDispatcher()`` method. To attach listeners, you can use the following example which echoes a message
right before and after a request is executed by the iterator.
.. code-block:: php
$iterator = $client->getListObjectsIterator(array(
'Bucket' => 'my-bucket'
));
// Get the event dispatcher and register listeners for both events
$dispatcher = $iterator->getEventDispatcher();
$dispatcher->addListener('resource_iterator.before_send', function ($event) {
echo "Getting more results…\n";
});
$dispatcher->addListener('resource_iterator.after_send', function ($event) use ($iterator) {
$requestCount = $iterator->getRequestCount();
echo "Results received. {$requestCount} request(s) made so far.\n";
});
foreach ($iterator as $object) {
echo $object['Key'] . "\n";
}

View File

@ -0,0 +1,163 @@
===============
Response Models
===============
Introduction
------------
.. include:: _snippets/models-intro.txt
Working with Model Objects
--------------------------
Model objects (and Command objects) inherit from the `Guzzle Collection class
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Guzzle.Common.Collection.html>`_ and implement PHP's native
``ArrayAccess``, ``IteratorAggregate``, and ``Countable`` interfaces. This means that they behave like arrays when you
are accessing keys and iterating over key-value pairs. You can also use the ``toArray()`` method of the Model object to
get the array form directly.
However, model objects will not throw errors on undefined keys, so it's safe to use values directly without doing
``isset()`` checks. It the key doesn't exist, then the value will be returned as ``null``.
.. code-block:: php
// Using a value that may not exist
if (!$result['ContentLength']) {
echo "Empty file.";
}
$isDeleted = (bool) $result->get('DeleteMarker');
Of course, you can still use ``isset()`` checks if you want to, since ``Model`` does implement ``ArrayAccess``. The
model object (and underlying Collection object) also has convenience methods for finding and checking for keys and
values.
.. code-block:: php
// You can use isset() since the object implements ArrayAccess
if (!isset($result['ContentLength'])) {
echo "Empty file.";
}
// There is also a method that does the same type of check
if (!$result->hasKey('ContentLength')) {
echo "Empty file.";
}
// If needed, you can search for a key in a case-insensitive manner
echo $result->keySearch('body');
//> Body
echo $result->keySearch('Body');
//> Body
// You can also list all of the keys in the result
var_export($result->getKeys());
//> array ( 'Body', 'DeleteMarker', 'Expiration', 'ContentLength', ... )
// The getAll() method will return the result data as an array
// You can specify a set of keys to only get a subset of the data
var_export($result->getAll(array('Body', 'ContentLength')));
//> array ( 'Body' => 'Hello!' , 'ContentLength' => 6 )
Getting Nested Values
~~~~~~~~~~~~~~~~~~~~~
The ``getPath()`` method of the model is useful for easily getting nested values from a response. The path is specified
as a series of keys separated by slashes.
.. code-block:: php
// Perform a RunInstances operation and traverse into the results to get the InstanceId
$result = $ec2Client->runInstances(array(
'ImageId' => 'ami-548f13d',
'MinCount' => 1,
'MaxCount' => 1,
'InstanceType' => 't1.micro',
));
$instanceId = $result->getPath('Instances/0/InstanceId');
Wildcards are also supported so that you can get extract an array of data. The following example is a modification of
the preceding such that multiple InstanceIds can be retrieved.
.. code-block:: php
// Perform a RunInstances operation and get an array of the InstanceIds that were created
$result = $ec2Client->runInstances(array(
'ImageId' => 'ami-548f13d',
'MinCount' => 3,
'MaxCount' => 5,
'InstanceType' => 't1.micro',
));
$instanceId = $result->getPath('Instances/*/InstanceId');
Using Data in the Model
-----------------------
Response Models contain the parsed data from the response from a service operation, so the contents of the model will
be different depending on which operation you've performed.
The SDK's API docs are the best resource for discovering what the model object will contain for a given operation. The
API docs contain a full specification of the data in the response model under the *Returns* section of the docs for an
operation (e.g., `S3 GetObject operation <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.S3.S3Client.html#_getObject>`_,
`EC2 RunInstances operation <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.Ec2.Ec2Client.html#_runInstances>`_).
From within your code you can convert the response model directly into an array using the ``toArray()`` method. If you
are doing some debugging in your code, you could use ``toArray()`` in conjunction with ``print_r()`` to print out a
simple representation of the response data.
.. code-block:: php
$result = $ec2Client->runInstances(array(/* ... */));
print_r($result->toArray());
You can also examine the service description for a service, which is located in the ``Resources`` directory within a
given client's namespace directory. For example, here is a snippet from the SQS service description (located in
``src/Aws/Sqs/Resources/``) that shows the schema for the response of the ``SendMessage`` operation.
.. code-block:: php
'SendMessageResult' => array(
'type' => 'object',
'additionalProperties' => true,
'properties' => array(
'MD5OfMessageBody' => array(
'description' => 'An MD5 digest of the non-URL-encoded message body string. This can be used to verify that SQS received the message correctly. SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://faqs.org/rfcs/rfc1321.html.',
'type' => 'string',
'location' => 'xml',
),
'MessageId' => array(
'description' => 'The message ID of the message added to the queue.',
'type' => 'string',
'location' => 'xml',
),
),
),
Lastly, if you are familiar with Guzzle and Guzzle's service description classes, you can also get the parameter
structures from the model object by calling ``$result->getStructure()`` (see the `API docs for the getStructure method
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Guzzle.Service.Resource.Model.html#_getStructure>`_).
Getting Response Headers
------------------------
The ``Response`` object is not directly accessible from the ``Model`` object. If you are interested in getting header
values, the status code, or other data from the response you will need to get the ``Response`` object from the
``Command`` object (see the :ref:`qs-executing-commands` section of the **Quick Start** guide). You may need to switch
from using the shorthand command syntax to the expanded syntax so that the command object can be accessed directly.
.. code-block:: php
// Getting the response Model with the shorthand syntax
$result = $s3Client->createBucket(array(/* ... */));
// Getting the response Model with the expanded syntax
$command = $s3Client->getCommand('CreateBucket', array(/* ... */));
$result = $command->getResult();
// Getting the Response object from the Command
$response = $command->getResponse();
$response->getHeader('Content-Length');
$response->getStatusCode();
In some cases, particularly with REST-like services like Amazon S3 and Amazon Glacier, most of the important headers are
already included in the response model.

View File

@ -0,0 +1,183 @@
=======
Waiters
=======
Introduction
------------
.. include:: _snippets/waiters-intro.txt
If the waiter has to poll the bucket too many times, it will throw an ``Aws\Common\Exception\RuntimeException``
exception.
The "``waitUntil[…]``" methods are all implemented via the ``__call`` magic method, and are a more discoverable shortcut
to using the concrete ``waitUntil()`` method, since many IDEs can auto-complete methods defined using the ``@method``
annotation. The following code uses the ``waitUntil()`` method, but is equivalent to the previous code sample.
.. code-block:: php
$client->waitUntil('BucketExists', array('Bucket' => 'my-bucket'));
Basic Configuration
-------------------
You can tune the number of polling attempts issued by a waiter or the number of seconds to delay between each poll by
passing optional values prefixed with "waiter.":
.. code-block:: php
$client->waitUntilBucketExists(array(
'Bucket' => 'my-bucket',
'waiter.interval' => 10,
'waiter.max_attempts' => 3
));
Waiter Objects
--------------
To interact with the waiter object directly, you must use the ``getWaiter()`` method. The following code is equivalent
to the example in the preceding section.
.. code-block:: php
$bucketExistsWaiter = $client->getWaiter('BucketExists')
->setConfig(array('Bucket' => 'my-bucket'))
->setInterval(10)
->setMaxAttempts(3);
$bucketExistsWaiter->wait();
Waiter Events
-------------
One benefit of working directly with the waiter object is that you can attach event listeners. Waiters emit up to two
events in each **wait cycle**. A wait cycle does the following:
#. Dispatch the ``waiter.before_attempt`` event.
#. Attempt to resolve the wait condition by making a request to the service and checking the result.
#. If the wait condition is resolved, the wait cycle exits. If ``max_attempts`` is reached, an exception is thrown.
#. Dispatch the ``waiter.before_wait`` event.
#. Sleep ``interval`` amount of seconds.
Waiter objects extend the ``Guzzle\Common\AbstractHasDispatcher`` class which exposes the ``addSubscriber()`` method and
``getEventDispatcher()`` method. To attach listeners, you can use the following example, which is a modified version of
the previous one.
.. code-block:: php
// Get and configure the waiter object
$waiter = $client->getWaiter('BucketExists')
->setConfig(array('Bucket' => 'my-bucket'))
->setInterval(10)
->setMaxAttempts(3);
// Get the event dispatcher and register listeners for both events emitted by the waiter
$dispatcher = $waiter->getEventDispatcher();
$dispatcher->addListener('waiter.before_attempt', function () {
echo "Checking if the wait condition has been met…\n";
});
$dispatcher->addListener('waiter.before_wait', function () use ($waiter) {
$interval = $waiter->getInterval();
echo "Sleeping for {$interval} seconds…\n";
});
$waiter->wait();
Custom Waiters
--------------
It is possible to implement custom waiter objects if your use case requires application-specific waiter logic or waiters
that are not yet supported by the SDK. You can use the ``getWaiterFactory()`` and ``setWaiterFactory()`` methods on the
client to manipulate the waiter factory used by the client such that your custom waiter can be instantiated. By default
the service clients use a ``Aws\Common\Waiter\CompositeWaiterFactory`` which allows you to add additional factories if
needed. The following example shows how to implement a contrived custom waiter class and then modify a client's waiter
factory such that it can create instances of the custom waiter.
.. code-block:: php
namespace MyApp\FakeWaiters
{
use Aws\Common\Waiter\AbstractResourceWaiter;
class SleptThreeTimes extends AbstractResourceWaiter
{
public function doWait()
{
if ($this->attempts < 3) {
echo "Need to sleep…\n";
return false;
} else {
echo "Now I've slept 3 times.\n";
return true;
}
}
}
}
namespace
{
use Aws\S3\S3Client;
use Aws\Common\Waiter\WaiterClassFactory;
$client = S3Client::factory();
$compositeFactory = $client->getWaiterFactory();
$compositeFactory->addFactory(new WaiterClassFactory('MyApp\FakeWaiters'));
$waiter = $client->waitUntilSleptThreeTimes();
}
The result of this code should look like the following::
Need to sleep…
Need to sleep…
Need to sleep…
Now I've slept 3 times.
Waiter Definitions
------------------
The waiters that are included in the SDK are defined in the service description for their client. They are defined
using a configuration DSL (domain-specific language) that describes the default wait intervals, wait conditions, and
how to check or poll the resource to resolve the condition.
This data is automatically consumed and used by the ``Aws\Common\Waiter\WaiterConfigFactory`` class when a client is
instantiated so that the waiters defined in the service description are available to the client.
The following is an excerpt of the Amazon Glacier service description that defines the waiters provided by
``Aws\Glacier\GlacierClient``.
.. code-block:: php
return array(
// ...
'waiters' => array(
'__default__' => array(
'interval' => 3,
'max_attempts' => 15,
),
'__VaultState' => array(
'operation' => 'DescribeVault',
),
'VaultExists' => array(
'extends' => '__VaultState',
'success.type' => 'output',
'description' => 'Wait until a vault can be accessed.',
'ignore_errors' => array(
'ResourceNotFoundException',
),
),
'VaultNotExists' => array(
'extends' => '__VaultState',
'description' => 'Wait until a vault is deleted.',
'success.type' => 'error',
'success.value' => 'ResourceNotFoundException',
),
),
// ...
);
In order for you to contribute waiters to the SDK, you will need to implement them using the waiters DSL. The DSL is not
documented yet, since it is currently subject to change, so if you are interested in helping to implement more waiters,
please reach out to us via `GitHub <https://github.com/aws/aws-sdk-php/issues>`_.

125
vendor/aws/aws-sdk-php/docs/index.rst vendored Normal file
View File

@ -0,0 +1,125 @@
===============
AWS SDK for PHP
===============
.. toctree::
:hidden:
awssignup
requirements
installation
quick-start
configuration
performance
faq
migration-guide
side-by-side
service-autoscaling
service-cloudformation
service-cloudfront
service-cloudsearch
service-cloudtrail
service-cloudwatch
service-datapipeline
service-directconnect
service-dynamodb
service-dynamodb-20111205
service-ec2
service-elasticache
service-elasticbeanstalk
service-elasticloadbalancing
service-elastictranscoder
service-emr
service-glacier
service-iam
service-importexport
service-opsworks
service-rds
service-redshift
service-route53
service-s3
service-ses
service-simpledb
service-sns
service-sqs
service-storagegateway
service-sts
service-support
service-swf
feature-dynamodb-session-handler
feature-waiters
feature-models
feature-facades
The **AWS SDK for PHP** enables PHP developers to easily interface with AWS services and build solutions with Amazon
Simple Storage Service (Amazon S3), Amazon DynamoDB, Amazon Glacier, and more. With the AWS SDK for PHP, developers can
get started in minutes by using Composer — by requiring the ``aws/aws-sdk-php`` package — or by downloading a single
`zip <http://pear.amazonwebservices.com/get/aws.zip>`_ or `phar <http://pear.amazonwebservices.com/get/aws.phar>`_ file.
Getting started
---------------
* :doc:`awssignup`
* :doc:`requirements`
* :doc:`installation`
* :doc:`quick-start`
* :doc:`configuration`
* :doc:`performance`
* :doc:`faq`
* `Contributing to the SDK <https://github.com/aws/aws-sdk-php/blob/master/CONTRIBUTING.md>`_
Migrating from SDK 1 to SDK 2
-----------------------------
* :doc:`migration-guide`
* :doc:`side-by-side`
.. _supported-services:
Supported services
------------------
* :doc:`service-autoscaling`
* :doc:`service-cloudformation`
* :doc:`service-cloudfront`
* :doc:`service-cloudsearch`
* :doc:`service-cloudtrail`
* :doc:`service-cloudwatch`
* :doc:`service-datapipeline`
* :doc:`service-directconnect`
* :doc:`service-dynamodb`
* :doc:`service-dynamodb-20111205`
* :doc:`service-ec2`
* :doc:`service-elasticache`
* :doc:`service-elasticbeanstalk`
* :doc:`service-elasticloadbalancing`
* :doc:`service-elastictranscoder`
* :doc:`service-emr`
* :doc:`service-glacier`
* :doc:`service-iam`
* :doc:`service-importexport`
* :doc:`service-opsworks`
* :doc:`service-rds`
* :doc:`service-redshift`
* :doc:`service-route53`
* :doc:`service-s3`
* :doc:`service-ses`
* :doc:`service-simpledb`
* :doc:`service-sns`
* :doc:`service-sqs`
* :doc:`service-storagegateway`
* :doc:`service-sts`
* :doc:`service-support`
* :doc:`service-swf`
SDK features in detail
----------------------
* :doc:`feature-dynamodb-session-handler`
* :doc:`feature-iterators`
* :doc:`feature-waiters`
* :doc:`feature-models`
* :doc:`feature-facades`

View File

@ -0,0 +1,143 @@
============
Installation
============
Installing via Composer
-----------------------
Using `Composer <http://getcomposer.org>`_ is the recommended way to install the AWS SDK for PHP. Composer is a
dependency management tool for PHP that allows you to declare the dependencies your project needs and installs them into
your project. In order to use the SDK with Composer, you must do the following:
#. Add ``"aws/aws-sdk-php"`` as a dependency in your project's ``composer.json`` file.
.. code-block:: js
{
"require": {
"aws/aws-sdk-php": "2.*"
}
}
Consider tightening your dependencies to a known version (e.g., ``2.3.*``).
#. Download and install Composer.
.. code-block:: sh
curl -sS https://getcomposer.org/installer | php
#. Install your dependencies.
.. code-block:: sh
php composer.phar install
#. Require Composer's autoloader.
Composer prepares an autoload file that's capable of autoloading all of the classes in any of the libraries that
it downloads. To use it, just add the following line to your code's bootstrap process.
.. code-block:: php
require '/path/to/sdk/vendor/autoload.php';
You can find out more on how to install Composer, configure autoloading, and other best-practices for defining
dependencies at `getcomposer.org <http://getcomposer.org>`_.
During your development, you can keep up with the latest changes on the master branch by setting the version
requirement for the SDK to ``dev-master``.
.. code-block:: js
{
"require": {
"aws/aws-sdk-php": "dev-master"
}
}
Installing via Phar
-------------------
Each release of the AWS SDK for PHP ships with a pre-packaged `phar <http://php.net/manual/en/book.phar.php>`_ (PHP
archive) file containing all of the classes and dependencies you need to run the SDK. Additionally, the phar file
automatically registers a class autoloader for the AWS SDK for PHP and all of its dependencies when included. Bundled
with the phar file are the following required and suggested libraries:
- `Guzzle <https://github.com/guzzle/guzzle>`_ for HTTP requests
- `Symfony2 EventDispatcher <http://symfony.com/doc/master/components/event_dispatcher/introduction.html>`_ for events
- `Monolog <https://github.com/seldaek/monolog>`_ and `Psr\\Log <https://github.com/php-fig/log>`_ for logging
- `Doctrine <https://github.com/doctrine/common>`_ for caching
You can `download the packaged Phar <http://pear.amazonwebservices.com/get/aws.phar>`_ and simply include it in your
scripts to get started::
require '/path/to/aws.phar';
If you have `phing <http://www.phing.info/>`_ installed, you can clone the SDK and build a phar file yourself using the
*"phar"* task.
.. note::
If you are using PHP with the Suhosin patch (especially common on Ubuntu and Debian distributions), you may need
to enable the use of phars in the ``suhosin.ini``. Without this, including a phar file in your code will cause it to
silently fail. You should modify the ``suhosin.ini`` file by adding the line:
``suhosin.executor.include.whitelist = phar``
Installing via Zip
------------------
Each release of the AWS SDK for PHP (since 2.3.2) ships with a zip file containing all of the classes and dependencies
you need to run the SDK in a `PSR-0 <https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-0.md>`_
compatible directory structure. Additionally, the zip file includes a class autoloader for the AWS SDK for PHP and the
following required and suggested libraries:
- `Guzzle <https://github.com/guzzle/guzzle>`_ for HTTP requests
- `Symfony2 EventDispatcher <http://symfony.com/doc/master/components/event_dispatcher/introduction.html>`_ for events
- `Monolog <https://github.com/seldaek/monolog>`_ and `Psr\\Log <https://github.com/php-fig/log>`_ for logging
- `Doctrine <https://github.com/doctrine/common>`_ for caching
Using the zip file is great if you:
1. Prefer not to or cannot use package managers like Composer and PEAR.
2. Cannot use phar files due to environment limitations.
3. Want to use only specific files from the SDK.
To get started, you must `download the zip file <http://pear.amazonwebservices.com/get/aws.zip>`_, unzip it into your
project to a location of your choosing, and include the autoloader::
require '/path/to/aws-autoloader.php';
Alternatively, you can write your own autoloader or use an existing one from your project.
If you have `phing <http://www.phing.info/>`_ installed, you can clone the SDK and build a zip file yourself using the
*"zip"* task.
Installing via PEAR
~~~~~~~~~~~~~~~~~~~
`PEAR <http://pear.php.net/>`_ packages are easy to install, and are available in your PHP environment path so that they
are accessible to any PHP project. PEAR packages are not specific to your project, but rather to the machine they're
installed on.
From the command-line, you can install the SDK with PEAR as follows (this might need to be run as ``sudo``):
.. code-block:: sh
pear config-set auto_discover 1
pear channel-discover pear.amazonwebservices.com
pear install aws/sdk
Alternatively, you can combine all three of the preceding statements into one by doing the following:
.. code-block:: sh
pear -D auto_discover=1 install pear.amazonwebservices.com/sdk
Once the SDK has been installed via PEAR, you can include the `phar <http://php.net/manual/en/book.phar.php>`_ into
your project with:
.. code-block:: php
require 'AWSSDKforPHP/aws.phar';

190
vendor/aws/aws-sdk-php/docs/make.bat vendored Normal file
View File

@ -0,0 +1,190 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\AWSSDKforPHP.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\AWSSDKforPHP.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
:end

View File

@ -0,0 +1,480 @@
===============
Migration Guide
===============
This guide shows how to migrate your code to use the new AWS SDK for PHP and how the new SDK differs from the
AWS SDK for PHP - Version 1.
Introduction
------------
The PHP language and community have evolved significantly over the past few years. Since the inception of the AWS SDK
for PHP, PHP has gone through two major version changes (`versions 5.3 and 5.4 <http://php.net/downloads.php#v5>`_) and
many in the PHP community have unified behind the recommendations of the `PHP Framework Interop Group
<http://php-fig.org>`_. Consequently, we decided to make breaking changes to the SDK in order to align with the more
modern patterns used in the PHP community.
For the new release, we rewrote the SDK from the ground up to address popular customer requests. The new SDK is built on
top of the `Guzzle HTTP client framework <http://guzzlephp.org>`_, which provides increased performance and enables
event-driven customization. We also introduced high-level abstractions to make programming common tasks easy. The SDK
is compatible with PHP 5.3.3 and newer, and follows the PSR-0 standard for namespaces and autoloading.
Which Services are Supported?
-----------------------------
The AWS SDK for PHP supports all of the AWS services supported by Version 1 of the SDK and more, including Amazon
Route 53, Amazon Glacier, and AWS Direct Connect. See the `AWS SDK for PHP website <http://aws.amazon.com/sdkforphp/>`_
for the full list of services supported by the SDK. Be sure to watch or star our `AWS SDK for PHP GitHub repository
<https://github.com/aws/aws-sdk-php>`_ to stay up-to-date with the latest changes.
What's New?
-----------
- `PHP 5.3 namespaces <http://php.net/namespaces>`_
- Follows `PSR-0, PSR-1, and PSR-2 standards <http://php-fig.org>`_
- Built on `Guzzle <http://guzzlephp.org>`_ and utilizes the Guzzle feature set
- Persistent connection management for both serial and parallel requests
- Event hooks (via `Symfony2 EventDispatcher
<http://symfony.com/doc/2.0/components/event_dispatcher/introduction.html>`_) for event-driven, custom behavior
- Request and response entity bodies are stored in ``php://temp`` streams to reduce memory usage
- Transient networking and cURL failures are automatically retried using truncated exponential backoff
- Plug-ins for over-the-wire logging and response caching
- "Waiter" objects that allow you to poll a resource until it is in a desired state
- Resource iterator objects for easily iterating over paginated responses
- Service-specific sets of exceptions
- Modeled responses with a simpler interface
- Grouped constants (Enums) for service parameter options
- Flexible request batching system
- Service builder/container that supports easy configuration and dependency injection
- Full unit test suite with extensive code coverage
- `Composer <http://getcomposer.org>`_ support (including PSR-0 compliance) for installing and autoloading SDK
dependencies
- `Phing <http://phing.info>`_ ``build.xml`` for installing dev tools, driving testing, and producing ``.phar`` files
- Fast Amazon DynamoDB batch PutItem and DeleteItem system
- Multipart upload system for Amazon Simple Storage Service (Amazon S3) and Amazon Glacier that can be paused and
resumed
- Redesigned DynamoDB Session Handler with smarter writing and garbage collection
- Improved multi-region support
What's Different?
-----------------
Architecture
~~~~~~~~~~~~
The new SDK is built on top of `Guzzle <http://guzzlephp.org>`_ and inherits its features and
conventions. Every AWS service client extends the Guzzle client, defining operations through a service description
file. The SDK has a much more robust and flexible object-oriented architecture, including the use of design patterns,
event dispatching and dependency injection. As a result, many of the classes and methods from the previous SDK have
been changed.
Project Dependencies
~~~~~~~~~~~~~~~~~~~~
Unlike the Version 1 of the SDK, the new SDK does not pre-package all of its dependencies
in the repository. Dependencies are best resolved and autoloaded via `Composer <http://getcomposer.org>`_. However,
when installing the SDK via the downloadable phar, the dependencies are resolved for you.
Namespaces
~~~~~~~~~~
The SDK's directory structure and namespaces are organized according to `PSR-0 standards
<https://github.com/php-fig/fig-standards/blob/master/accepted/PSR-0.md>`_, making the SDK inherently modular. The
``Aws\Common`` namespace contains the core code of the SDK, and each service client is contained in its own separate
namespace (e.g., ``Aws\DynamoDb``).
Coding Standards
~~~~~~~~~~~~~~~~
The SDK adopts the PSR standards produced by the PHP Framework Interop Group. An immediately
noticeable change is that all method names are now named using lower camel-case
(e.g., ``putObject`` instead of ``put_object``).
Required Regions
~~~~~~~~~~~~~~~~
The `region <http://docs.aws.amazon.com/general/latest/gr/rande.html>`_ must be provided to instantiate a client
(except in the case where the service has a single endpoint like Amazon CloudFront). The AWS region you select may
affect both your performance and costs.
Client Factories
~~~~~~~~~~~~~~~~
Factory methods instantiate service clients and do the work of setting up the signature,
exponential backoff settings, exception handler, and so forth. At a minimum you must provide your access key, secret
key, and region to the client factory, but there are many other settings you can use to customize the client
behavior.
.. code-block:: php
$dynamodb = Aws\DynamoDb\DynamoDbClient::factory(array(
'key' => 'your-aws-access-key-id',
'secret' => 'your-aws-secret-access-key',
'region' => 'us-west-2',
));
Configuration
~~~~~~~~~~~~~
A global configuration file can be used to inject credentials into clients
automatically via the service builder. The service builder acts as a dependency injection container for the service
clients. (**Note:** The SDK does not automatically attempt to load the configuration file like in Version 1 of the
SDK.)
.. code-block:: php
$aws = Aws\Common\Aws::factory('/path/to/custom/config.php');
$s3 = $aws->get('s3');
This technique is the preferred way for instantiating service clients. Your ``config.php`` might look similar to the
following:
.. code-block:: php
<?php
return array(
'includes' => array('_aws'),
'services' => array(
'default_settings' => array(
'params' => array(
'key' => 'your-aws-access-key-id',
'secret' => 'your-aws-secret-access-key',
'region' => 'us-west-2'
)
)
)
);
The line that says ``'includes' => array('_aws')`` includes the default configuration file packaged with the SDK. This
sets up all of the service clients for you so you can retrieve them by name with the ``get()`` method of the service
builder.
Service Operations
~~~~~~~~~~~~~~~~~~
Executing operations in the new SDK is similar to how it was in the previous SDK, with two
main differences. First, operations follow the lower camel-case naming convention. Second, a single array parameter is
used to pass in all of the operation options. The following examples show the Amazon S3 ``PutObject`` operation
performed in each SDK:
.. code-block:: php
// Previous SDK - PutObject operation
$s3->create_object('bucket-name', 'object-key.txt', array(
'body' => 'lorem ipsum'
));
.. code-block:: php
// New SDK - PutObject operation
$result = $s3->putObject(array(
'Bucket' => 'bucket-name',
'Key' => 'object-key.txt',
'Body' => 'lorem ipsum'
));
In the new SDK, the ``putObject()`` method doesn't actually exist as a method on the client. It is implemented using
the ``__call()`` magic method of the client and acts as a shortcut to instantiate a command, execute the command,
and retrieve the result.
A ``Command`` object encapsulates the request and response of the call to AWS. From the ``Command`` object, you can
call the ``getResult()`` method (as in the preceding example) to retrieve the parsed result, or you can call the
``getResponse()`` method to retrieve data about the response (e.g., the status code or the raw response).
The ``Command`` object can also be useful when you want to manipulate the command before execution or need to execute
several commands in parallel. The following is an example of the same ``PutObject`` operation using the command
syntax:
.. code-block:: php
$command = $s3->getCommand('PutObject', array(
'Bucket' => 'bucket-name',
'Key' => 'object-key.txt',
'Body' => 'lorem ipsum'
));
$result = $command->getResult();
Or you can use the chainable ``set()`` method on the ``Command`` object:
.. code-block:: php
$result = $s3->getCommand('PutObject')
->set('Bucket', 'bucket-name')
->set('Key', 'object-key.txt')
->set('Body', 'lorem ipsum')
->getResult();
Responses
~~~~~~~~~
The format of responses has changed. Responses are no longer instances of the ``CFResponse`` object.
The ``Command`` object (as seen in the preceding section) of the new SDK encapsulates the request and response, and is
the object from which to retrieve the results.
.. code-block:: php
// Previous SDK
// Execute the operation and get the CFResponse object
$response = $s3->list_tables();
// Get the parsed response body as a SimpleXMLElement
$result = $response->body;
// New SDK
// Executes the operation and gets the response in an array-like object
$result = $s3->listTables();
The new syntax is similar, but a few fundamental differences exist between responses in the previous SDK and this
version:
The new SDK represents parsed responses (i.e., the results) as Guzzle ``Model`` objects instead of ``CFSimpleXML``
objects as in the prior version. These Model objects are easy to work with since they act like arrays. They also
have helpful built-in features such as mapping and filtering. The content of the results will also look different
n this version of the SDK. The SDK marshals responses into the models and then transforms them into more convenient
structures based on the service description. The API documentation details the response of all operations.
Exceptions
~~~~~~~~~~
The new SDK uses exceptions to communicate errors and bad responses.
Instead of relying on the ``CFResponse::isOK()`` method of the previous SDK to determine if an operation is
successful, the new SDK throws exceptions when the operation is *not* successful. Therefore, you can assume success
if there was no exception thrown, but you will need to add ``try...catch`` logic to your application code in order to
handle potential errors. The following is an example of how to handle the response of an Amazon DynamoDB
``DescribeTable`` call in the new SDK:
.. code-block:: php
$tableName = 'my-table';
try {
$result = $dynamoDb->describeTable(array('TableName' => $tableName));
printf('The provisioned throughput for table "%s" is %d RCUs and %d WCUs.',
$tableName,
$result->getPath('Table/ProvisionedThroughput/ReadCapacityUnits'),
$result->getPath('Table/ProvisionedThroughput/WriteCapacityUnits')
);
} catch (Aws\DynamoDb\Exception\DynamoDbException $e) {
echo "Error describing table {$tableName}";
}
You can get the Guzzle response object back from the command. This is helpful if you need to retrieve the status
code, additional data from the headers, or the raw response body.
.. code-block:: php
$command = $dynamoDb->getCommand('DescribeTable', array('TableName' => $tableName));
$statusCode = $command->getResponse()->getStatusCode();
You can also get the response object and status code from the exception if one is thrown.
.. code-block:: php
try {
$command = $dynamoDb->getCommand('DescribeTable', array(
'TableName' => $tableName
));
$statusCode = $command->getResponse()->getStatusCode();
} catch (Aws\DynamoDb\Exception\DynamoDbException $e) {
$statusCode = $e->getResponse()->getStatusCode();
}
Iterators
~~~~~~~~~
The SDK provides iterator classes that make it easier to traverse results from list and describe type
operations. Instead of having to code solutions that perform multiple requests in a loop and keep track of tokens or
markers, the iterator classes do that for you. You can simply foreach over the iterator:
.. code-block:: php
$objects = $s3->getIterator('ListObjects', array(
'Bucket' => 'my-bucket-name'
));
foreach ($objects as $object) {
echo $object['Key'] . PHP_EOL;
}
Comparing Code Samples from Both SDKs
-------------------------------------
Example 1 - Amazon S3 ListParts Operation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
From Version 1 of the SDK
^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: php
<?php
require '/path/to/sdk.class.php';
require '/path/to/config.inc.php';
$s3 = new AmazonS3();
$response = $s3->list_parts('my-bucket-name', 'my-object-key', 'my-upload-id', array(
'max-parts' => 10
));
if ($response->isOK())
{
// Loop through and display the part numbers
foreach ($response->body->Part as $part) {
echo "{$part->PartNumber}\n";
}
}
else
{
echo "Error during S3 ListParts operation.\n";
}
From Version 2 of the SDK
^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: php
<?php
require '/path/to/vendor/autoload.php';
use Aws\Common\Aws;
use Aws\S3\Exception\S3Exception;
$aws = Aws::factory('/path/to/config.php');
$s3 = $aws->get('s3');
try {
$result = $s3->listParts(array(
'Bucket' => 'my-bucket-name',
'Key' => 'my-object-key',
'UploadId' => 'my-upload-id',
'MaxParts' => 10
));
// Loop through and display the part numbers
foreach ($result['Part'] as $part) {
echo "{$part[PartNumber]}\n";
}
} catch (S3Exception $e) {
echo "Error during S3 ListParts operation.\n";
}
Example 2 - Amazon DynamoDB Scan Operation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
From Version 1 of the SDK
^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: php
<?php
require '/path/to/sdk.class.php';
require '/path/to/config.inc.php';
$dynamo_db = new AmazonDynamoDB();
$start_key = null;
$people = array();
// Perform as many Scan operations as needed to acquire all the names of people
// that are 16 or older
do
{
// Setup the parameters for the DynamoDB Scan operation
$params = array(
'TableName' => 'people',
'AttributesToGet' => array('id', 'age', 'name'),
'ScanFilter' => array(
'age' => array(
'ComparisonOperator' =>
AmazonDynamoDB::CONDITION_GREATER_THAN_OR_EQUAL,
'AttributeValueList' => array(
array(AmazonDynamoDB::TYPE_NUMBER => '16')
)
),
)
);
// Add the exclusive start key parameter if needed
if ($start_key)
{
$params['ExclusiveStartKey'] = array(
'HashKeyElement' => array(
AmazonDynamoDB::TYPE_STRING => $start_key
)
);
$start_key = null;
}
// Perform the Scan operation and get the response
$response = $dynamo_db->scan($params);
// If the response succeeded, get the results
if ($response->isOK())
{
foreach ($response->body->Items as $item)
{
$people[] = (string) $item->name->{AmazonDynamoDB::TYPE_STRING};
}
// Get the last evaluated key if it is provided
if ($response->body->LastEvaluatedKey)
{
$start_key = (string) $response->body
->LastEvaluatedKey
->HashKeyElement
->{AmazonDynamoDB::TYPE_STRING};
}
}
else
{
// Throw an exception if the response was not OK (200-level)
throw new DynamoDB_Exception('DynamoDB Scan operation failed.');
}
}
while ($start_key);
print_r($people);
From Version 2 of the SDK
^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: php
<?php
require '/path/to/vendor/autoload.php';
use Aws\Common\Aws;
use Aws\DynamoDb\Enum\ComparisonOperator;
use Aws\DynamoDb\Enum\Type;
$aws = Aws::factory('/path/to/config.php');
$dynamodb = $aws->get('dynamodb');
// Create a ScanIterator and setup the parameters for the DynamoDB Scan operation
$scan = $dynamodb->getIterator('Scan', array(
'TableName' => 'people',
'AttributesToGet' => array('id', 'age', 'name'),
'ScanFilter' => array(
'age' => array(
'ComparisonOperator' => ComparisonOperator::GE,
'AttributeValueList' => array(
array(Type::NUMBER => '16')
)
),
)
));
// Perform as many Scan operations as needed to acquire all the names of people
// that are 16 or older
$people = array();
foreach ($scan as $item) {
$people[] = $item['name'][Type::STRING];
}
print_r($people);

View File

@ -0,0 +1,287 @@
=================
Performance Guide
=================
The AWS SDK for PHP is able to send HTTP requests to various web services with minimal overhead. This document serves
as a guide that will help you to achieve optimal performance with the SDK.
.. contents::
:depth: 1
:local:
:class: inline-toc
Upgrade PHP
-----------
Using an up-to-date version of PHP will generally improve the performance of your PHP applications. Did you know that
PHP 5.4 is `20-40% faster <http://news.php.net/php.internals/57760>`_ than PHP 5.3?
`Upgrading to PHP 5.4 <http://www.php.net/manual/en/migration54.php>`_ or greater will provide better performance and
lower memory usage. If you cannot upgrade from PHP 5.3 to PHP 5.4 or PHP 5.5, upgrading to PHP 5.3.18 or greater will
improve performance over older versions of PHP 5.3.
You can install PHP 5.4 on an Amazon Linux AMI using the following command.
.. code-block:: bash
yum install php54
Use PHP 5.5 or an opcode cache like APC
---------------------------------------
To improve the overall performance of your PHP environment, it is highly recommended that you use an opcode cache
such as the OPCache built into PHP 5.5, APC, XCache, or WinCache. By default, PHP must load a file from disk, parse
the PHP code into opcodes, and finally execute the opcodes. Installing an opcode cache allows the parsed opcodes to
be cached in memory so that you do not need to parse the script on every web server request, and in ideal
circumstances, these opcodes can be served directly from memory.
We have taken great care to ensure that the SDK will perform well in an environment that utilizes an opcode cache.
.. note::
PHP 5.5 comes with an opcode cache that is installed and enabled by default:
http://php.net/manual/en/book.opcache.php
If you are using PHP 5.5, then you may skip the remainder of this section.
APC
~~~
If you are not able to run PHP 5.5, then we recommend using APC as an opcode cache.
Installing on Amazon Linux
^^^^^^^^^^^^^^^^^^^^^^^^^^
When using Amazon Linux, you can install APC using one of the following commands depending on if you are using PHP 5.3
or PHP 5.4.
.. code-block:: bash
# For PHP 5.4
yum install php54-pecl-apc
# For PHP 5.3
yum install php-pecl-apc
Modifying APC settings
^^^^^^^^^^^^^^^^^^^^^^
APC configuration settings can be set and configured in the ``apc.ini`` file of most systems. You can find more
information about configuring APC in the PHP.net `APC documentation <http://www.php.net/manual/en/apc.configuration.php>`_.
The APC configuration file is located at ``/etc/php.d/apc.ini`` on Amazon Linux.
.. code-block:: bash
# You can only modify the file as sudo
sudo vim /etc/php.d/apc.ini
apc.shm_size=128M
^^^^^^^^^^^^^^^^^
It is recommended that you set the `apc.shm_size <http://www.php.net/manual/en/apc.configuration.php#ini.apc.shm-size>`_
setting to be 128M or higher. You should investigate what the right value will be for your application. The ideal
value will depend on how many files your application includes, what other frameworks are used by your application, and
if you are caching data in the APC user cache.
You can run the following command on Amazon Linux to set apc.shm_size to 128M::
sed -i "s/apc.shm_size=.*/apc.shm_size=128M/g" /etc/php.d/apc.ini
apc.stat=0
^^^^^^^^^^
The SDK adheres to PSR-0 and relies heavily on class autoloading. When ``apc.stat=1``, APC will perform a stat on
each cached entry to ensure that the file has not been updated since it was cache in APC. This incurs a system call for
every autoloaded class required by a PHP script (you can see this for yourself by running ``strace`` on your
application).
You can tell APC to not stat each cached file by setting ``apc.stat=0`` in you apc.ini file. This change will generally
improve the overall performance of APC, but it will require you to explicitly clear the APC cache when a cached file
should be updated. This can be accomplished with Apache by issuing a hard or graceful restart. This restart step could
be added as part of the deployment process of your application.
You can run the following command on Amazon Linux to set apc.stat to 0::
sed -i "s/apc.stat=1/apc.stat=0/g" /etc/php.d/apc.ini
.. admonition:: From the `PHP documentation <http://www.php.net/manual/en/apc.configuration.php#ini.apc.stat>`_
This defaults to on, forcing APC to stat (check) the script on each request to determine if it has been modified. If
it has been modified it will recompile and cache the new version. If this setting is off, APC will not check, which
usually means that to force APC to recheck files, the web server will have to be restarted or the cache will have to
be manually cleared. Note that FastCGI web server configurations may not clear the cache on restart. On a production
server where the script files rarely change, a significant performance boost can be achieved by disabled stats.
For included/required files this option applies as well, but note that for relative path includes (any path that
doesn't start with / on Unix) APC has to check in order to uniquely identify the file. If you use absolute path
includes APC can skip the stat and use that absolute path as the unique identifier for the file.
Use Composer with a classmap autoloader
---------------------------------------
Using `Composer <http://getcomposer.org>`_ is the recommended way to install the AWS SDK for PHP. Composer is a
dependency manager for PHP that can be used to pull in all of the dependencies of the SDK and generate an autoloader.
Autoloaders are used to lazily load classes as they are required by a PHP script. Composer will generate an autoloader
that is able to autoload the PHP scripts of your application and all of the PHP scripts of the vendors required by your
application (i.e. the AWS SDK for PHP). When running in production, it is highly recommended that you use a classmap
autoloader to improve the autoloader's speed. You can generate a classmap autoloader by passing the ``-o`` or
``--optimize-autoloader`` option to Composer's `install command <http://getcomposer.org/doc/03-cli.md#install>`_::
php composer.phar install --optimize-autoloader
Please consult the :doc:`installation` guide for more information on how to install the SDK using Composer.
Uninstall Xdebug
----------------
`Xdebug <http://xdebug.org/>`_ is an amazing tool that can be used to identify performance bottlenecks. However, if
performance is critical to your application, do not install the Xdebug extension on your production environment. Simply
loading the extension will greatly slow down the SDK.
When running on Amazon Linux, Xdebug can be removed with the following command:
.. code-block:: bash
# PHP 5.4
yum remove php54-pecl-xdebug
# PHP 5.3
yum remove php-pecl-xdebug
Install PECL uri_template
-------------------------
The SDK utilizes URI templates to power each operation. In order to be compatible out of the box with the majority
of PHP environments, the default URI template expansion implementation is written in PHP.
`PECL URI_Template <https://github.com/ioseb/uri-template>`_ is a URI template extension for PHP written in C. This C
implementation is about 3 times faster than the default PHP implementation for expanding URI templates. Your
application will automatically begin utilizing the PECL uri_template extension after it is installed.
.. code-block:: bash
pecl install uri_template-alpha
Turn off parameter validation
-----------------------------
The SDK utilizes service descriptions to tell the client how to serialize an HTTP request and parse an HTTP response
into a Model object. Along with serialization information, service descriptions are used to validate operation inputs
client-side before sending a request. Disabling parameter validation is a micro-optimization, but this setting can
typically be disabled in production by setting the ``validation`` option in a client factory method to ``false``.
.. code-block:: php
$client = Aws\DynamoDb\DynamoDbClient::factory(array(
'region' => 'us-west-2',
'validation' => false
));
Cache instance profile credentials
----------------------------------
When you do not provide credentials to the SDK and do not have credentials defined in your environment variables, the
SDK will attempt to utilize IAM instance profile credentials by contacting the Amazon EC2 instance metadata service
(IMDS). Contacting the IMDS requires an HTTP request to retrieve credentials from the IMDS.
You can cache these instance profile credentials in memory until they expire and avoid the cost of sending an HTTP
request to the IMDS each time the SDK is utilized. Set the ``credentials.cache`` option to ``true`` to attempt to
utilize the `Doctrine Cache <https://github.com/doctrine/cache>`_ PHP library to cache credentials with APC.
.. code-block:: php
$client = Aws\DynamoDb\DynamoDbClient::factory(array(
'region' => 'us-west-2',
'credentials.cache' => true
));
.. note::
You will need to install Doctrine Cache in order for the SDK to cache credentials when setting
``credentials.cache`` to ``true``. You can add doctrine/cache to your composer.json dependencies by adding to your
project's ``required`` section::
{
"required": {
"aws/sdk": "2.*",
"doctrine/cache": "1.*"
}
}
Check if you are being throttled
--------------------------------
You can check to see if you are being throttled by enabling the exponential backoff logger option. You can set the
``client.backoff.logger`` option to ``debug`` when in development, but we recommend that you provide a
``Guzzle\Log\LogAdapterInterface`` object when running in production.
.. code-block:: php
$client = Aws\DynamoDb\DynamoDbClient::factory(array(
'region' => 'us-west-2',
'client.backoff.logger' => 'debug'
));
When using Amazon DynamoDB, you can monitor your tables for throttling using
`Amazon CloudWatch <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html#CloudwatchConsole_DynamoDB>`_.
Preload frequently included files
---------------------------------
The AWS SDK for PHP adheres to PSR-0 and heavily utilizes class autoloading. Each class is in a separate file and
are included lazily as they are required. Enabling an opcode cache like APC, setting ``apc.stat=0``, and utilizing an
optimized Composer autoloader will help to mitigate the performance cost of autoloading the classes needed to utilize
the SDK. In situations like hosting a webpage where you are loading the same classes over and over, you can shave off a
bit more time by compiling all of the autoloaded classes into a single file thereby completely eliminating the cost of
autoloading. This technique can not only speed up the use of the SDK for specific use cases (e.g. using the
Amazon DynamoDB session handler), but can also speed up other aspects of your application. Even with ``apc.stat=0``,
preloading classes that you know will be used in your application will be slightly faster than relying on autoloading.
You can easily generate a compiled autoloader file using the
`ClassPreloader <https://github.com/mtdowling/ClassPreloader>`_ project. View the project's README for information on
creating a "preloader" for use with the AWS SDK for PHP.
Profile your code to find performance bottlenecks
-------------------------------------------------
You will need to profile your application to determine the bottlenecks. This can be done using
`Xdebug <http://xdebug.org/>`_, `XHProf <https://github.com/facebook/xhprof>`_,
`strace <http://en.wikipedia.org/wiki/Strace>`_, and various other tools. There are many resources available on the
internet to help you track down performance problems with your application. Here are a few that we have found useful:
* http://talks.php.net/show/devconf/0
* http://talks.php.net/show/perf_tunning/16
Comparing SDK1 and SDK2
-----------------------
Software performance is very subjective and depends heavily on factors outside of the control of the SDK. The
AWS SDK for PHP is tuned to cover the broadest set of performance sensitive applications using AWS. While there may
be a few isolated cases where V1 of the the SDK is as fast or faster than V2, that is not generally true and comes
with the loss of extensibility, maintainability, persistent HTTP connections, response parsing, PSR compliance, etc.
Depending on your use case, you will find that a properly configured environment running the AWS SDK for PHP is
generally just as fast as SDK1 for sending a single request and more than 350% faster than SDK1 for sending many
requests.
Comparing batch requests
~~~~~~~~~~~~~~~~~~~~~~~~
A common misconception when comparing the performance of SDK1 and SDK2 is that SDK1 is faster than SDK2 when sending
requests using the "batch()" API.
SDK1 is generally *not* faster at sending requests in parallel than SDK2. There may be some cases where SDK1 will appear
to more quickly complete the process of sending multiple requests in parallel, but SDK1 does not retry throttled
requests when using the ``batch()`` API. In SDK2, throttled requests are automatically retried in parallel using
truncated exponential backoff. Automatically retrying failed requests will help to ensure that your application is
successfully completing the requests that you think it is.
You can always disable retries if your use case does not benefit from retrying failed requests. To disable retries,
set 'client.backoff' to ``false`` when creating a client.
.. code-block:: php
$client = Aws\DynamoDb\DynamoDbClient::factory(array(
'region' => 'us-west-2',
'client.backoff' => false
));

View File

@ -0,0 +1,226 @@
===========
Quick Start
===========
Including the SDK
-----------------
No matter which :doc:`installation method <installation>` you are using, the SDK can be included into your project or
script with a single include (or require) statement. Please refer to the following table for the code that best fits
your installation method. Please replace any instances of ``/path/to/`` with the actual path on your system.
========================== =============================================================================================
Installation Method Include Statement
========================== =============================================================================================
Using Composer ``require '/path/to/vendor/autoload.php';``
-------------------------- ---------------------------------------------------------------------------------------------
Using the Phar ``require '/path/to/aws.phar';``
-------------------------- ---------------------------------------------------------------------------------------------
Using the Zip ``require '/path/to/aws-autoloader.php';``
-------------------------- ---------------------------------------------------------------------------------------------
Using PEAR ``require 'AWSSDKforPHP/aws.phar';``
========================== =============================================================================================
For the remainder of this guide, we will show examples that use the Composer installation method. If you are using a
different installation method, then you can refer to this section and substitute in the proper code.
Creating a client
-----------------
You can quickly get up and running by using a web service client's factory method to instantiate clients as needed.
.. code-block:: php
<?php
// Include the SDK using the Composer autoloader
require 'vendor/autoload.php';
use Aws\S3\S3Client;
// Instantiate the S3 client with your AWS credentials and desired AWS region
$client = S3Client::factory(array(
'key' => 'your-aws-access-key-id',
'secret' => 'your-aws-secret-access-key',
));
**Note:** Instantiating a client without providing credentials causes the client to attempt to retrieve `IAM Instance
Profile credentials
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UsingIAM.html#UsingIAMrolesWithAmazonEC2Instances>`_.
Instance Profile Credentials are not supported by every service. `Please check if the service you are using supports
temporary credentials <http://docs.aws.amazon.com/STS/latest/UsingSTS/UsingTokens.html>`_.
Commands
--------
You can then invoke service operations on the client by calling the operation name and providing an associative array
of parameters. Service operation methods like Amazon S3's ``createBucket()`` don't actually exist on a client. These
methods are implemented using the ``__call()`` magic method of a client. These magic methods are derived from a Guzzle
`service description <http://guzzlephp.org/guide/service/service_descriptions.html>`_ present in the
client's namespace in the ``Resources`` directory. You can use the `API documentation
<http://docs.aws.amazon.com/aws-sdk-php/latest/>`_ or directly view the service description to see what
operations are available, what parameters can be set for an operation, what values are provided in the response model,
and what exceptions are thrown by calling the operation.
.. code-block:: php
$bucket = 'my-bucket';
$result = $client->createBucket(array(
'Bucket' => $bucket
));
// Wait until the bucket is created
$client->waitUntil('BucketExists', array('Bucket' => $bucket));
.. _qs-executing-commands:
Executing commands
~~~~~~~~~~~~~~~~~~
Commands can be executed in two ways: using the shorthand syntax via the ``__call()`` magic methods (as shown in the
preceding example) or using the expanded syntax via the ``getCommand()`` method of the client object.
.. code-block:: php
// The shorthand syntax (via __call())
$result = $client->createBucket(array(/* ... */));
// The expanded syntax (via getCommand())
$command = $client->getCommand('CreateBucket', array(/* ... */));
$result = $command->getResult();
When using the expanded syntax, a ``Command`` object is returned from ``getCommand()``, which encapsulates the request
and response of the HTTP request to AWS. From the ``Command`` object, you can call the ``getResult()`` method or the
``execute()`` method to execute the command and get the parsed result. Additionally, you can call the ``getRequest()``
and ``getResponse()`` methods (after the command has been executed) to get information about the request and response,
respectively (e.g., the status code or the raw response, headers sent in the request, etc.).
The ``Command`` object also supports a chainable syntax and can also be useful when you want to manipulate the request
before execution.
.. code-block:: php
$result = $client->getCommand('ListObjects')
->set('MaxKeys', 50)
->set('Prefix', 'foo/baz/')
->getResult();
It also allows for executing multiple commands in parallel.
.. code-block:: php
$ops = array();
$ops[] = $client->getCommand('GetObject', array('Bucket' => 'foo', 'Key' => 'Bar'));
$ops[] = $client->getCommand('GetObject', array('Bucket' => 'foo', 'Key' => 'Baz'));
$client->execute($ops);
Response models
~~~~~~~~~~~~~~~
.. include:: _snippets/models-intro.txt
To learn more about how to work with models, please read the detailed guide to :doc:`feature-models`.
Using the service builder
-------------------------
When using the SDK, you have the option to use individual factory methods for each client or the ``Aws\Common\Aws``
class to build your clients. The ``Aws\Common\Aws`` class is a service builder and dependency injection container for
the SDK and is the recommended way for instantiating clients. The service builder allows you to share configuration
options between multiple services and pre-wires short service names with the appropriate client class.
The following example shows how to use the service builder to retrieve a ``Aws\DynamoDb\DynamoDbClient`` and perform the
``GetItem`` operation using the command syntax.
Passing an associative array of parameters as the first or second argument of ``Aws\Common\Aws::factory()`` treats the
parameters as shared across all clients generated by the builder. In the example, we tell the service builder to use the
same credentials for every client.
.. code-block:: php
<?php
require 'vendor/autoload.php';
use Aws\Common\Aws;
use Aws\DynamoDb\Exception\DynamoDbException;
// Create a service building using shared credentials for each service
$aws = Aws::factory(array(
'key' => 'your-aws-access-key-id',
'secret' => 'your-aws-secret-access-key',
'region' => 'us-west-2'
));
// Retrieve the DynamoDB client by its short name from the service builder
$client = $aws->get('dynamodb');
// Get an item from the "posts"
try {
$result = $client->getItem(array(
'TableName' => 'posts',
'Key' => $client->formatAttributes(array(
'HashKeyElement' => 'using-dynamodb-with-the-php-sdk'
)),
'ConsistentRead' => true
));
print_r($result['Item']);
} catch (DynamoDbException $e) {
echo 'The item could not be retrieved.';
}
Passing an associative array of parameters to the first or second argument of ``Aws\Common\Aws::factory()`` will treat
the parameters as shared parameters across all clients generated by the builder. In the above example, we are telling
the service builder to use the same credentials for every client.
Error handling
--------------
An exception is thrown when an error is encountered. Be sure to use try/catch blocks when implementing error handling
logic in your applications. The SDK throws service specific exceptions when a server-side error occurs.
.. code-block:: php
use Aws\Common\Aws;
use Aws\S3\Exception\BucketAlreadyExistsException;
$aws = Aws::factory('/path/to/my_config.json');
$s3 = $aws->get('s3');
try {
$s3->createBucket(array('Bucket' => 'my-bucket'));
} catch (BucketAlreadyExistsException $e) {
echo 'That bucket already exists! ' . $e->getMessage() . "\n";
}
The HTTP response to the ``createBucket()`` method will receive a ``409 Conflict`` response with a
``BucketAlreadyExists`` error code. When the SDK sees the error code it will attempt to throw a named exception that
matches the name of the HTTP response error code. You can see a full list of supported exceptions for each client by
looking in the `Exception/` directory of a client namespace. For example, `src/Aws/S3/Exception` contains many different
exception classes::
.
├── AccessDeniedException.php
├── AccountProblemException.php
├── AmbiguousGrantByEmailAddressException.php
├── BadDigestException.php
├── BucketAlreadyExistsException.php
├── BucketAlreadyOwnedByYouException.php
├── BucketNotEmptyException.php
[...]
Waiters
-------
.. include:: _snippets/waiters-intro.txt
To learn more about how to use and configure waiters, please read the detailed guide to :doc:`feature-waiters`.
Iterators
---------
.. include:: _snippets/iterators-intro.txt
To learn more about how to use and configure iterators, please read the detailed guide to :doc:`feature-iterators`.

View File

@ -0,0 +1,39 @@
============
Requirements
============
Aside from a baseline understanding of object-oriented programming in PHP (including PHP 5.3 namespaces), there are a
few minimum system requirements to start using the AWS SDK for PHP. The extensions listed are common and are
installed with PHP 5.3 by default in most environments.
Minimum requirements
--------------------
* PHP 5.3.3+ compiled with the cURL extension
* A recent version of cURL 7.16.2+ compiled with OpenSSL and zlib
.. note::
To work with Amazon CloudFront private distributions, you must have the OpenSSL PHP extension to sign private
CloudFront URLs.
.. _optimal-settings:
Optimal settings
----------------
Please consult the :doc:`performance` for a list of recommendations and optimal settings that can be made to
ensure that you are using the SDK as efficiently as possible.
Compatibility test
------------------
Run the `compatibility-test.php` file in the SDK to quickly check if your system is capable of running the SDK. In
addition to meeting the minimum system requirements of the SDK, the compatibility test checks for optional settings and
makes recommendations that can help you to improve the performance of the SDK. The compatibility test can output text
for the command line or a web browser. When running in a browser, successful checks appear in green, warnings in
purple, and failures in red. When running from the CLI, the result of a check will appear on each line.
When reporting an issue with the SDK, it is often helpful to share information about your system. Supplying the output
of the compatibility test in forum posts or GitHub issues can help to streamline the process of identifying the root
cause of an issue.

View File

@ -0,0 +1,3 @@
rst2pdf
Sphinx>=1.2b1
guzzle_sphinx_theme>=0.3.0

View File

@ -0,0 +1,3 @@
.. service:: AutoScaling
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: CloudFormation
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,132 @@
.. service:: CloudFront
Signing CloudFront URLs for Private Distributions
-------------------------------------------------
Signed URLs allow you to provide users access to your private content. A signed URL includes additional information
(e.g., expiration time) that gives you more control over access to your content. This additional information appears in
a policy statement, which is based on either a canned policy or a custom policy. For information about how to set up
private distributions and why you need to sign URLs, please read the `Serving Private Content through CloudFront section
<http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html>`_ of the CloudFront Developer
Guide.
.. note:
You must have the OpenSSL extension installed in you PHP environment in order to sign CloudFront URLs.
You can sign a URL using the CloudFront client in the SDK. First you must make sure to provide your CloudFront
Private Key and Key Pair ID to the CloudFront client.
.. code-block:: php
<?php
$cloudFront = CloudFrontClient::factory(array(
'private_key' => '/path/to/your/cloudfront-private-key.pem',
'key_pair_id' => '<cloudfront key pair id>',
));
You can alternatively specify the Private Key and Key Pair ID in your AWS config file and use the service builder to
instantiate the CloudFront client. The following is an example config file that specifies the CloudFront key information.
.. code-block:: php
<?php return array(
'includes' => array('_aws'),
'services' => array(
'default_settings' => array(
'params' => array(
'key' => '<aws access key>',
'secret' => '<aws secret key>',
'region' => 'us-west-2'
)
),
'cloudfront' => array(
'extends' => 'cloudfront',
'params' => array(
'private_key' => '/path/to/your/cloudfront-private-key.pem',
'key_pair_id' => '<cloudfront key pair id>'
)
)
)
);
You can sign a CloudFront URL for a video resource using either a canned or custom policy.
.. code-block:: php
// Setup parameter values for the resource
$streamHostUrl = 'rtmp://example-distribution.cloudfront.net';
$resourceKey = 'videos/example.mp4';
$expires = time() + 300;
// Create a signed URL for the resource using the canned policy
$signedUrlCannedPolicy = $cloudFront->getSignedUrl(array(
'url' => $streamHostUrl . '/' . $resourceKey,
'expires' => $expires,
));
For versions of the SDK later than 2.3.1, instead of providing your private key information when you instantiate the
client, you can provide it at the time when you sign the URL.
.. code-block:: php
$signedUrlCannedPolicy = $cloudFront->getSignedUrl(array(
'url' => $streamHostUrl . '/' . $resourceKey,
'expires' => $expires,
'private_key' => '/path/to/your/cloudfront-private-key.pem',
'key_pair_id' => '<cloudfront key pair id>'
));
To use a custom policy, provide the ``policy`` key instead of ``expires``.
.. code-block:: php
$customPolicy = <<<POLICY
{
"Statement": [
{
"Resource": "{$resourceKey}",
"Condition": {
"IpAddress": {"AWS:SourceIp": "{$_SERVER['REMOTE_ADDR']}/32"},
"DateLessThan": {"AWS:EpochTime": {$expires}}
}
}
]
}
POLICY;
// Create a signed URL for the resource using a custom policy
$signedUrlCustomPolicy = $cloudFront->getSignedUrl(array(
'url' => $streamHostUrl . '/' . $resourceKey,
'policy' => $customPolicy,
));
The form of the signed URL is actually different depending on if the URL you are signing is using the "http" or "rtmp"
scheme. In the case of "http", the full, absolute URL is returned. For "rtmp", only the relative URL is returned for
your convenience, because some players require the host and path to be provided as separate parameters.
The following is an example of how you could use the signed URL to construct a web page displaying a video using
`JWPlayer <http://www.longtailvideo.com/jw-player/>`_. The same type of technique would apply to other players like
`FlowPlayer <http://flowplayer.org/>`_, but will require different client-side code.
.. code-block:: html
<html>
<head>
<title>Amazon CloudFront Streaming Example</title>
<script type="text/javascript" src="https://example.com/jwplayer.js"></script>
</head>
<body>
<div id="video">The canned policy video will be here.</div>
<script type="text/javascript">
jwplayer('video').setup({
file: "<?= $streamHostUrl ?>/cfx/st/<?= $signedUrlCannedPolicy ?>",
width: "720",
height: "480"
});
</script>
</body>
</html>
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: CloudSearch
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: CloudTrail
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: CloudWatch
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: DataPipeline
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: DirectConnect
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,160 @@
.. service:: DynamoDb 2011-12-05
Creating tables
---------------
You must first create a table that can be used to store items. Even though Amazon DynamoDB tables do not use a fixed
schema, you do need to create a schema for the table's keys. This is explained in greater detail in Amazon DynamoDB's
`Data Model documentation <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html>`_. You
will also need to specify the amount of `provisioned throughput
<http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html>`_ that should
be made available to the table.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testCreateTable
The table will now have a status of ``CREATING`` while the table is being provisioned. You can use a waiter to poll the
table until it becomes ``ACTIVE``.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testWaitUntilTableExists
A full list of the parameters available to the ``createTable()`` operation can be found in the
`API documentation <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_createTable>`_.
Updating a table
----------------
You can also update the table after it's been created using the `updateTable() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_updateTable>`_ method. This allows you to do things
like increase or decrease your provisioned throughput capacity.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testUpdateTable
Describing a table
------------------
Now that the table is created, you can use the
`describeTable() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_describeTable>`_
method to get information about the table.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testDescribeTable
The return value of the ``describeTable()`` method is a ``Guzzle\Service\Resource\Model`` object that can be used like
an array. For example, you could retrieve the number of items in a table or the amount of provisioned read throughput.
Listing tables
--------------
You can retrieve a list of all of the tables associated with a specific endpoint using the
`listTables() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_listTables>`_
method. Each Amazon DynamoDB endpoint is entirely independent. For example, if you have two tables called "MyTable," one
in US-EAST-1 and one in US-WEST-2, they are completely independent and do not share any data. The ListTables operation
returns all of the table names associated with the account making the request, for the endpoint that receives the
request.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testListTables
Iterating over all tables
~~~~~~~~~~~~~~~~~~~~~~~~~
The result of a ``listTables()`` operation might be truncated. Because of this, it is usually better to use an iterator
to retrieve a complete list of all of the tables owned by your account in a specific region. The iterator will
automatically handle sending any necessary subsequent requests.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testListTablesWithIterator
.. tip::
You can convert an iterator to an array using the ``toArray()`` method of the iterator.
Adding items
------------
You can add an item to our *errors* table using the
`putItem() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_putItem>`_
method of the client.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testAddItem
As you can see, the ``formatAttributes()`` method of the client can be used to more easily format the attributes of the
item. Alternatively, you can provide the item attributes without using the helper method:
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testAddItemWithoutHelperMethod
Retrieving items
----------------
You can check if the item was added correctly using the
`getItem() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_getItem>`_
method of the client. Because Amazon DynamoDB works under an 'eventual consistency' model, we need to specify that we
are performing a `consistent read
<http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/APISummary.html#DataReadConsistency>`_ operation.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testGetItem
You can also retrieve items in batches of up to 100 using the `BatchGetItem()
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_batchGetItem>`_ method.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testBatchGetItem
Query and scan
--------------
Once data is in an Amazon DynamoDB table, you have two APIs for searching the data:
`Query and Scan <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html>`_.
Query
~~~~~
A query operation searches only primary key attribute values and supports a subset of comparison operators on key
attribute values to refine the search process. A query returns all of the item data for the matching primary keys
(all of each item's attributes) up to 1MB of data per query operation.
Let's say we want a list of all "1201" errors that occurred in the last 15 minutes. We could issue a single query
that will search by the primary key of the table and retrieve up to 1MB of the items. However, a better approach is to
use the query iterator to retrieve the entire list of all items matching the query.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testQuery
Scan
~~~~
A scan operation scans the entire table. You can specify filters to apply to the results to refine the values
returned to you, after the complete scan. Amazon DynamoDB puts a 1MB limit on the scan (the limit applies before
the results are filtered).
A scan can be useful for more complex searches. For example, we can retrieve all of the errors in the last 15
minutes that contain the word "overflow":
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testScan
Deleting a table
----------------
.. warning::
Deleting a table will also permanently delete all of its contents.
Now that you've taken a quick tour of the PHP client for Amazon DynamoDB, you will want to clean up by deleting the
resources you created.
.. example:: DynamoDb/Integration/DynamoDb_20111205_Test.php testDeleteTable
Using the WriteRequestBatch
---------------------------
You can use the ``WriteRequestBatch`` if you need to write or delete many items as quickly as possible. The
WriteRequestBatch provides a high level of performance because it converts what would normally be a separate HTTP
request for each operation into HTTP requests containing up to 25 comparable requests per transaction.
If you have a large array of items you wish to add to your table, you could iterate over the them, add each item to the
batch object. After all the items are added call ``flush()``. The batch object will automatically flush the batch and
write items to Amazon DynamoDB after hitting a customizable threshold. A final call to the batch object's ``flush()``
method is necessary to transfer any remaining items in the queue.
.. example:: DynamoDb/Integration/WriteRequestBatch_20111205_Test.php testWriteRequestBatchForPuts
You can also use the ``WriteRequestBatch`` object to delete items in batches.
.. example:: DynamoDb/Integration/WriteRequestBatch_20111205_Test.php testWriteRequestBatchForDeletes
The ``WriteRequestBatch``, ``PutRequest``, and ``DeleteRequest`` classes are all a part of the
``Aws\DynamoDb\Model\BatchRequest`` namespace.

View File

@ -0,0 +1,212 @@
.. service:: DynamoDb
Creating tables
---------------
You must first create a table that can be used to store items. Even though Amazon DynamoDB tables do not use a fixed
schema, you do need to create a schema for the table's keys. This is explained in greater detail in Amazon DynamoDB's
`Data Model documentation <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html>`_. You
will also need to specify the amount of `provisioned throughput
<http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html>`_ that should
be made available to the table.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testCreateTable
The table will now have a status of ``CREATING`` while the table is being provisioned. You can use a waiter to poll the
table until it becomes ``ACTIVE``.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testWaitUntilTableExists
A full list of the parameters available to the ``createTable()`` operation can be found in the `API documentation
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_createTable>`_. For more
information about using Local Secondary Indexes, please see the :ref:`dynamodb-lsi` section of this guide.
Updating a table
----------------
You can also update the table after it's been created using the `updateTable() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_updateTable>`_ method. This allows you to do things
like increase or decrease your provisioned throughput capacity.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testUpdateTable
Describing a table
------------------
Now that the table is created, you can use the
`describeTable() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_describeTable>`_
method to get information about the table.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testDescribeTable
The return value of the ``describeTable()`` method is a ``Guzzle\Service\Resource\Model`` object that can be used like
an array. For example, you could retrieve the number of items in a table or the amount of provisioned read throughput.
Listing tables
--------------
You can retrieve a list of all of the tables associated with a specific endpoint using the
`listTables() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_listTables>`_
method. Each Amazon DynamoDB endpoint is entirely independent. For example, if you have two tables called "MyTable," one
in US-EAST-1 and one in US-WEST-2, they are completely independent and do not share any data. The ListTables operation
returns all of the table names associated with the account making the request, for the endpoint that receives the
request.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testListTables
Iterating over all tables
~~~~~~~~~~~~~~~~~~~~~~~~~
The result of a ``listTables()`` operation might be truncated. Because of this, it is usually better to use an iterator
to retrieve a complete list of all of the tables owned by your account in a specific region. The iterator will
automatically handle sending any necessary subsequent requests.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testListTablesWithIterator
.. tip::
You can convert an iterator to an array using the ``toArray()`` method of the iterator.
Adding items
------------
You can add an item to our *errors* table using the
`putItem() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_putItem>`_
method of the client.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testAddItem
As you can see, the ``formatAttributes()`` method of the client can be used to more easily format the attributes of the
item. Alternatively, you can provide the item attributes without using the helper method:
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testAddItemWithoutHelperMethod
You can also add items in batches of up to 25 items using the `BatchWriteItem()
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_batchWriteItem>`_
method. Please see the example as shown in the :ref:`dynamodb-lsi` section of this guide.
There is also a higher-level abstraction in the SDK over the ``BatchWriteItem`` operation called the
``WriteRequestBatch`` that handles queuing of write requests and retrying of unprocessed items. Please see the
:ref:`dynamodb-wrb` section of this guide for more information.
Retrieving items
----------------
You can check if the item was added correctly using the
`getItem() <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_getItem>`_
method of the client. Because Amazon DynamoDB works under an 'eventual consistency' model, we need to specify that we
are performing a `consistent read
<http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/APISummary.html#DataReadConsistency>`_ operation.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testGetItem
You can also retrieve items in batches of up to 100 using the `BatchGetItem()
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_batchGetItem>`_ method.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testBatchGetItem
Query and scan
--------------
Once data is in an Amazon DynamoDB table, you have two APIs for searching the data:
`Query and Scan <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html>`_.
Query
~~~~~
A query operation searches only primary key attribute values and supports a subset of comparison operators on key
attribute values to refine the search process. A query returns all of the item data for the matching primary keys
(all of each item's attributes) up to 1MB of data per query operation.
Let's say we want a list of all "1201" errors that occurred in the last 15 minutes. We could issue a single query
that will search by the primary key of the table and retrieve up to 1MB of the items. However, a better approach is to
use the query iterator to retrieve the entire list of all items matching the query.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testQuery
Scan
~~~~
A scan operation scans the entire table. You can specify filters to apply to the results to refine the values
returned to you, after the complete scan. Amazon DynamoDB puts a 1MB limit on the scan (the limit applies before
the results are filtered).
A scan can be useful for more complex searches. For example, we can retrieve all of the errors in the last 15
minutes that contain the word "overflow":
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testScan
Deleting items
--------------
To delete an item you must use the `DeleteItem()
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_batchGetItem>`_ method.
The following example scans through a table and deletes every item one by one.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testDeleteItem
You can also delete items in batches of up to 25 items using the `BatchWriteItem()
<http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.DynamoDb.DynamoDbClient.html#_batchWriteItem>`_ method.
Deleting a table
----------------
.. warning::
Deleting a table will also permanently delete all of its contents.
Now that you've taken a quick tour of the PHP client for Amazon DynamoDB, you will want to clean up by deleting the
resources you created.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testDeleteTable
.. _dynamodb-lsi:
Local secondary indexes
-----------------------
Local secondary indexes (LSI) pair your table's leading hash key with an alternate range key, in order to enable
specific queries to run more quickly than they would using a standard composite primary key. The following code samples
will show how to create an *Orders* table with a hash key of *CustomerId* and a range key of *OrderId*, but also include
a local secondary index on the *OrderDate* attribute so that searching the table based by *OrderDate* can be done with a
``Query`` operation instead of a ``Scan`` operation.
First you must create the table with the local secondary index. Note that the attributes referenced in the key schema
for the table *and* the index must all be declared in the ``AttributeDefinitions`` parameter. When you create a local
secondary index, you can specify which attributes get "projected" into the index using the ``Projection`` parameter.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testCreateTableWithLocalSecondaryIndexes
Next you must add some items to the table that you will be querying. There's nothing in the ``BatchWriteItem`` operation
that is specific to the LSI features, but since there is not an example of this operation elsewhere in the guide, this
seems like a good place to show how to use this operation.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testBatchWriteItem
When you query the table with an LSI, you must specify the name of the index using the ``IndexName`` parameter. The
attributes that are returned will depend on the value of the ``Select`` parameter and on what the table is projecting
to the index. In this case ``'Select' => 'COUNT'`` has been specified, so only the count of the items will be returned.
.. example:: DynamoDb/Integration/DynamoDb_20120810_Test.php testQueryWithLocalSecondaryIndexes
.. _dynamodb-wrb:
Using the WriteRequestBatch
---------------------------
You can use the ``WriteRequestBatch`` if you need to write or delete many items as quickly as possible. The
WriteRequestBatch provides a high level of performance because it converts what would normally be a separate HTTP
request for each operation into HTTP requests containing up to 25 comparable requests per transaction.
If you have a large array of items you wish to add to your table, you could iterate over the them, add each item to the
batch object. After all the items are added call ``flush()``. The batch object will automatically flush the batch and
write items to Amazon DynamoDB after hitting a customizable threshold. A final call to the batch object's ``flush()``
method is necessary to transfer any remaining items in the queue.
.. example:: DynamoDb/Integration/WriteRequestBatch_20120810_Test.php testWriteRequestBatchForPuts
You can also use the ``WriteRequestBatch`` object to delete items in batches.
.. example:: DynamoDb/Integration/WriteRequestBatch_20120810_Test.php testWriteRequestBatchForDeletes
The ``WriteRequestBatch``, ``PutRequest``, and ``DeleteRequest`` classes are all a part of the
``Aws\DynamoDb\Model\BatchRequest`` namespace.

View File

@ -0,0 +1,3 @@
.. service:: Ec2
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: ElastiCache
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: ElasticBeanstalk
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: ElasticLoadBalancing
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: ElasticTranscoder
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: Emr
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: Glacier
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: Iam
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: ImportExport
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: OpsWorks
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: Rds
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,76 @@
.. service:: Redshift
Creating a cluster
------------------
The primary resource in Amazon Redshift is the cluster. To create a cluster you will use the ``CreateCluster``
operation. There are several parameters you can send when creating a cluster, so please refer to the API docs to
determine which parameters to use. The following is basic example.
.. code-block:: php
$client->createCluster(array(
'ClusterIdentifier' => 'your-unique-cluster-id',
'ClusterType' => 'multi-node',
'MasterUsername' => 'yourusername',
'MasterUserPassword' => 'Y0urP@$$w0rd',
'NodeType' => 'dw.hs1.xlarge',
'NumberOfNodes' => 2,
));
After the ``CreateCluster`` operation is complete, the record for your cluster will exist, but it will still take some
time before the cluster is actually available for use. You can describe your cluster to check it's status.
.. code-block:: php
$result = $client->describeClusters(array(
'ClusterIdentifier' => 'your-unique-cluster-id',
));
$clusters = $result->get('Clusters');
$status = $clusters['ClusterStatus'];
If you would like your code to wait until the cluster is available, you can use the the ``ClusterAvailable`` waiter.
.. code-block:: php
$client->waitUntilClusterAvailable(array(
'ClusterIdentifier' => 'your-unique-cluster-id',
));
.. warning:: It can take over 20 minutes for a cluster to become available.
Creating snapshots
------------------
You can also take snapshots of your cluster with the ``CreateClusterSnapshot`` operation. Snapshots can take a little
time before they become available as well, so there is a corresponding ``SnapshotAvailable`` waiter.
.. code-block:: php
$client->createClusterSnapshot(array(
'ClusterIdentifier' => 'your-unique-cluster-id',
'SnapshotIdentifier' => 'your-unique-snapshot-id',
));
$client->waitUntilSnapshotAvailable(array(
'SnapshotIdentifier' => 'your-unique-snapshot-id',
));
Events
------
Amazon Redshift records events that take place with your clusters and account. These events are available for up to 14
days and can be retrieved via the ``DescribeEvents`` operation. Only 100 events can be returned at a time, so using the
SDK's iterators feature allows you to easily fetch and iterate over all the events in your query without having to
manually send repeated requests. The ``StartTime`` and ``EndTime`` parameters can take any PHP date string or DateTime
object.
.. code-block:: php
$events = $client->getIterator('DescribeEvents', array(
'StartTime' => strtotime('-3 days'),
'EndTime' => strtotime('now'),
));
foreach ($events as $event) {
echo "{$event['Date']}: {$event['Message']}\n";
}

View File

@ -0,0 +1,3 @@
.. service:: Route53
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,716 @@
.. service:: S3
Creating a bucket
-----------------
Now that we've created a client object, let's create a bucket. This bucket will be used throughout the remainder of this
guide.
.. example:: S3/Integration/S3_20060301_Test.php testBucketAlreadyExists
If you run the above code example unaltered, you'll probably trigger the following exception::
PHP Fatal error: Uncaught Aws\S3\Exception\BucketAlreadyExistsException: AWS Error
Code: BucketAlreadyExists, Status Code: 409, AWS Request ID: D94E6394791E98A4,
AWS Error Type: client, AWS Error Message: The requested bucket name is not
available. The bucket namespace is shared by all users of the system. Please select
a different name and try again.
This is because bucket names in Amazon S3 reside in a global namespace. You'll need to change the actual name of the
bucket used in the examples of this tutorial in order for them to work correctly.
Creating a bucket in another region
-----------------------------------
The above example creates a bucket in the standard US-EAST-1 region. You can change the bucket location by passing a
``LocationConstraint`` value.
.. example:: S3/Integration/S3_20060301_Test.php testCreateBucketInRegion
You'll notice in the above example that we are using the ``Aws\Common\Enum\Region`` object to provide the ``US_WEST_2``
constant. The SDK provides various Enum classes under the ``Aws\Common\Enum`` namespace that can be useful for
remembering available values and ensuring you do not enter a typo.
.. note::
Using the enum classes is not required. You could just pass 'us-west-2' in the ``LocationConstraint`` key.
Waiting until the bucket exists
-------------------------------
Now that we've created a bucket, let's force our application to wait until the bucket exists. This can be done easily
using a *waiter*. The following snippet of code will poll the bucket until it exists or the maximum number of
polling attempts are completed.
.. example:: S3/Integration/S3_20060301_Test.php testWaitUntilBucketExists
Uploading objects
-----------------
Now that you've created a bucket, let's put some data in it. The following example creates an object in your bucket
called data.txt that contains 'Hello!'.
.. example:: S3/Integration/S3_20060301_Test.php testPutObject
The AWS SDK for PHP will attempt to automatically determine the most appropriate Content-Type header used to store the
object. If you are using a less common file extension and your Content-Type header is not added automatically, you can
add a Content-Type header by passing a ``ContentType`` option to the operation.
Uploading a file
~~~~~~~~~~~~~~~~
The above example uploaded text data to your object. You can alternatively upload the contents of a file by passing
the ``SourceFile`` option. Let's also put some metadata on the object.
.. example:: S3/Integration/S3_20060301_Test.php testPutObjectFromFile
Uploading from a stream
~~~~~~~~~~~~~~~~~~~~~~~
Alternatively, you can pass a resource returned from an ``fopen`` call to the ``Body`` parameter.
.. example:: S3/Integration/S3_20060301_Test.php testPutObjectFromStream
Because the AWS SDK for PHP is built around Guzzle, you can also pass an EntityBody object.
.. example:: S3/Integration/S3_20060301_Test.php testPutObjectFromEntityBody
Listing your buckets
--------------------
You can list all of the buckets owned by your account using the ``listBuckets`` method.
.. example:: S3/Integration/S3_20060301_Test.php testListBuckets
All service operation calls using the AWS SDK for PHP return a ``Guzzle\Service\Resource\Model`` object. This object
contains all of the data returned from the service in a normalized array like object. The object also contains a
``get()`` method used to retrieve values from the model by name, and a ``getPath()`` method that can be used to
retrieve nested values.
.. example:: S3/Integration/S3_20060301_Test.php testListBucketsWithGetPath
Listing objects in your buckets
-------------------------------
Listing objects is a lot easier in the new SDK thanks to *iterators*. You can list all of the objects in a bucket using
the ``ListObjectsIterator``.
.. example:: S3/Integration/S3_20060301_Test.php testListObjectsWithIterator
Iterators will handle sending any required subsequent requests when a response is truncated. The ListObjects iterator
works with other parameters too.
.. code-block:: php
$iterator = $client->getIterator('ListObjects', array(
'Bucket' => $bucket,
'Prefix' => 'foo'
));
foreach ($iterator as $object) {
echo $object['Key'] . "\n";
}
You can convert any iterator to an array using the ``toArray()`` method of the iterator.
.. note::
Converting an iterator to an array will load the entire contents of the iterator into memory.
Downloading objects
-------------------
You can use the ``GetObject`` operation to download an object.
.. example:: S3/Integration/S3_20060301_Test.php testGetObject
The contents of the object are stored in the ``Body`` parameter of the model object. Other parameters are stored in
model including ``ContentType``, ``ContentLength``, ``VersionId``, ``ETag``, etc...
The ``Body`` parameter stores a reference to a ``Guzzle\Http\EntityBody`` object. The SDK will store the data in a
temporary PHP stream by default. This will work for most use-cases and will automatically protect your application from
attempting to download extremely large files into memory.
The EntityBody object has other nice features that allow you to read data using streams.
.. example:: S3/Integration/S3_20060301_Test.php testGetObjectUsingEntityBody
Saving objects to a file
~~~~~~~~~~~~~~~~~~~~~~~~
You can save the contents of an object to a file by setting the SaveAs parameter.
.. example:: S3/Integration/S3_20060301_Test.php testGetObjectWithSaveAs
Uploading large files using multipart uploads
---------------------------------------------
Amazon S3 allows you to uploads large files in pieces. The AWS SDK for PHP provides an abstraction layer that makes it
easier to upload large files using multipart upload.
.. code-block:: php
use Aws\Common\Enum\Size;
use Aws\Common\Exception\MultipartUploadException;
use Aws\S3\Model\MultipartUpload\UploadBuilder;
$uploader = UploadBuilder::newInstance()
->setClient($client)
->setSource('/path/to/large/file.mov')
->setBucket('mybucket')
->setKey('my-object-key')
->setOption('Metadata', array('Foo' => 'Bar'))
->setOption('CacheControl', 'max-age=3600')
->build();
// Perform the upload. Abort the upload if something goes wrong
try {
$uploader->upload();
echo "Upload complete.\n";
} catch (MultipartUploadException $e) {
$uploader->abort();
echo "Upload failed.\n";
}
You can attempt to upload parts in parallel by specifying the concurrency option on the UploadBuilder object. The
following example will create a transfer object that will attempt to upload three parts in parallel until the entire
object has been uploaded.
.. code-block:: php
$uploader = UploadBuilder::newInstance()
->setClient($client)
->setSource('/path/to/large/file.mov')
->setBucket('mybucket')
->setKey('my-object-key')
->setConcurrency(3)
->build();
You can use the ``Aws\S3\S3Client::upload()`` method if you just want to upload files and not worry if they are too
large to send in a single PutObject operation or require a multipart upload.
.. code-block:: php
$client->upload('bucket', 'key', 'object body', 'public-read');
Setting ACLs and Access Control Policies
----------------------------------------
You can specify a canned ACL on an object when uploading:
.. code-block:: php
$client->putObject(array(
'Bucket' => 'mybucket',
'Key' => 'data.txt',
'SourceFile' => '/path/to/data.txt',
'ACL' => 'public-read'
));
You can use the ``Aws\S3\Enum\CannedAcl`` object to provide canned ACL constants:
.. code-block:: php
use Aws\S3\Enum\CannedAcl;
$client->putObject(array(
'Bucket' => 'mybucket',
'Key' => 'data.txt',
'SourceFile' => '/path/to/data.txt',
'ACL' => CannedAcl::PUBLIC_READ
));
You can specify more complex ACLs using the ``ACP`` parameter when sending PutObject, CopyObject, CreateBucket,
CreateMultipartUpload, PutBucketAcl, PutObjectAcl, and other operations that accept a canned ACL. Using the ``ACP``
parameter allows you specify more granular access control policies using a ``Aws\S3\Model\Acp`` object. The easiest
way to create an Acp object is through the ``Aws\S3\Model\AcpBuilder``.
.. code-block:: php
use Aws\S3\Enum\Permission;
use Aws\S3\Enum\Group;
use Aws\S3\Model\AcpBuilder;
$acp = AcpBuilder::newInstance()
->setOwner($myOwnerId)
->addGrantForEmail(Permission::READ, 'test@example.com')
->addGrantForUser(Permission::FULL_CONTROL, 'user-id')
->addGrantForGroup(Permission::READ, Group::AUTHENTICATED_USERS)
->build();
$client->putObject(array(
'Bucket' => 'mybucket',
'Key' => 'data.txt',
'SourceFile' => '/path/to/data.txt',
'ACP' => $acp
));
Creating a pre-signed URL
-------------------------
You can authenticate certain types of requests by passing the required information as query-string parameters instead
of using the Authorization HTTP header. This is useful for enabling direct third-party browser access to your private
Amazon S3 data, without proxying the request. The idea is to construct a "pre-signed" request and encode it as a URL
that an end-user's browser can retrieve. Additionally, you can limit a pre-signed request by specifying an expiration
time.
The most common scenario is creating a pre-signed URL to GET an object. The easiest way to do this is to use the
``getObjectUrl`` method of the Amazon S3 client. This same method can also be used to get an unsigned URL of a public
S3 object.
.. example:: S3/Integration/S3_20060301_Test.php testGetObjectUrl
You can also create pre-signed URLs for any Amazon S3 operation using the ``getCommand`` method for creating a Guzzle
command object and then calling the ``createPresignedUrl()`` method on the command.
.. example:: S3/Integration/S3_20060301_Test.php testCreatePresignedUrlFromCommand
If you need more flexibility in creating your pre-signed URL, then you can create a pre-signed URL for a completely
custom ``Guzzle\Http\Message\RequestInterface`` object. You can use the ``get()``, ``post()``, ``head()``, ``put()``,
and ``delete()`` methods of a client object to easily create a Guzzle request object.
.. example:: S3/Integration/S3_20060301_Test.php testCreatePresignedUrl
Amazon S3 stream wrapper
------------------------
The Amazon S3 stream wrapper allows you to store and retrieve data from Amazon S3 using built-in PHP functions like
``file_get_contents``, ``fopen``, ``copy``, ``rename``, ``unlink``, ``mkdir``, ``rmdir``, etc.
You need to register the Amazon S3 stream wrapper in order to use it:
.. code-block:: php
// Register the stream wrapper from an S3Client object
$client->registerStreamWrapper();
This allows you to access buckets and objects stored in Amazon S3 using the ``s3://`` protocol. The "s3" stream wrapper
accepts strings that contain a bucket name followed by a forward slash and an optional object key or prefix:
``s3://<bucket>[/<key-or-prefix>]``.
Downloading data
~~~~~~~~~~~~~~~~
You can grab the contents of an object using ``file_get_contents``. Be careful with this function though; it loads the
entire contents of the object into memory.
.. code-block:: php
// Download the body of the "key" object in the "bucket" bucket
$data = file_get_contents('s3://bucket/key');
Use ``fopen()`` when working with larger files or if you need to stream data from Amazon S3.
.. code-block:: php
// Open a stream in read-only mode
if ($stream = fopen('s3://bucket/key', 'r')) {
// While the stream is still open
while (!feof($stream)) {
// Read 1024 bytes from the stream
echo fread($stream, 1024);
}
// Be sure to close the stream resource when you're done with it
fclose($stream);
}
Opening Seekable streams
^^^^^^^^^^^^^^^^^^^^^^^^
Streams opened in "r" mode only allow data to be read from the stream, and are not seekable by default. This is so that
data can be downloaded from Amazon S3 in a truly streaming manner where previously read bytes do not need to be
buffered into memory. If you need a stream to be seekable, you can pass ``seekable`` into the `stream context
options <http://www.php.net/manual/en/function.stream-context-create.php>`_ of a function.
.. code-block:: php
$context = stream_context_create(array(
's3' => array(
'seekable' => true
)
));
if ($stream = fopen('s3://bucket/key', 'r', false, $context)) {
// Read bytes from the stream
fread($stream, 1024);
// Seek back to the beginning of the stream
fseek($steam, 0);
// Read the same bytes that were previously read
fread($stream, 1024);
fclose($stream);
}
Opening seekable streams allows you to seek only to bytes that were previously read. You cannot skip ahead to bytes
that have not yet been read from the remote server. In order to allow previously read data to recalled, data is
buffered in a PHP temp stream using Guzzle's
`CachingEntityBody <https://github.com/guzzle/guzzle/blob/master/src/Guzzle/Http/CachingEntityBody.php>`_ decorator.
When the amount of cached data exceed 2MB, the data in the temp stream will transfer from memory to disk. Keep this in
mind when downloading large files from Amazon S3 using the ``seekable`` stream context setting.
Uploading data
~~~~~~~~~~~~~~
Data can be uploaded to Amazon S3 using ``file_put_contents()``.
.. code-block:: php
file_put_contents('s3://bucket/key', 'Hello!');
You can upload larger files by streaming data using ``fopen()`` and a "w", "x", or "a" stream access mode. The Amazon
S3 stream wrapper does **not** support simultaneous read and write streams (e.g. "r+", "w+", etc). This is because the
HTTP protocol does not allow simultaneous reading and writing.
.. code-block:: php
$stream = fopen('s3://bucket/key', 'w');
fwrite($stream, 'Hello!');
fclose($stream);
.. note::
Because Amazon S3 requires a Content-Length header to be specified before the payload of a request is sent, the
data to be uploaded in a PutObject operation is internally buffered using a PHP temp stream until the stream is
flushed or closed.
fopen modes
~~~~~~~~~~~
PHP's `fopen() <http://php.net/manual/en/function.fopen.php>`_ function requires that a ``$mode`` option is specified.
The mode option specifies whether or not data can be read or written to a stream and if the file must exist when
opening a stream. The Amazon S3 stream wrapper supports the following modes:
= ======================================================================================================================
r A read only stream where the file must already exist.
w A write only stream. If the file already exists it will be overwritten.
a A write only stream. If the file already exists, it will be downloaded to a temporary stream and any writes to
the stream will be appended to any previously uploaded data.
x A write only stream. An error is raised if the file does not already exist.
= ======================================================================================================================
Other object functions
~~~~~~~~~~~~~~~~~~~~~~
Stream wrappers allow many different built-in PHP functions to work with a custom system like Amazon S3. Here are some
of the functions that the Amazon S3 stream wrapper allows you to perform with objects stored in Amazon S3.
=============== ========================================================================================================
unlink() Delete an object from a bucket.
.. code-block:: php
// Delete an object from a bucket
unlink('s3://bucket/key');
You can pass in any options available to the ``DeleteObject`` operation to modify how the object is
deleted (e.g. specifying a specific object version).
.. code-block:: php
// Delete a specific version of an object from a bucket
unlink('s3://bucket/key', stream_context_create(array(
's3' => array('VersionId' => '123')
));
filesize() Get the size of an object.
.. code-block:: php
// Get the Content-Length of an object
$size = filesize('s3://bucket/key', );
is_file() Checks if a URL is a file.
.. code-block:: php
if (is_file('s3://bucket/key')) {
echo 'It is a file!';
}
file_exists() Checks if an object exists.
.. code-block:: php
if (file_exists('s3://bucket/key')) {
echo 'It exists!';
}
filetype() Checks if a URL maps to a file or bucket (dir).
file() Load the contents of an object in an array of lines. You can pass in any options available to the
``GetObject`` operation to modify how the file is downloaded.
filemtime() Get the last modified date of an object.
rename() Rename an object by copying the object then deleting the original. You can pass in options available to
the ``CopyObject`` and ``DeleteObject`` operations to the stream context parameters to modify how the
object is copied and deleted.
copy() Copy an object from one location to another. You can pass options available to the ``CopyObject``
operation into the stream context options to modify how the object is copied.
.. code-block:: php
// Copy a file on Amazon S3 to another bucket
copy('s3://bucket/key', 's3://other_bucket/key');
=============== ========================================================================================================
Working with buckets
~~~~~~~~~~~~~~~~~~~~
You can modify and browse Amazon S3 buckets similar to how PHP allows the modification and traversal of directories on
your filesystem.
Here's an example of creating a bucket:
.. code-block:: php
mkdir('s3://bucket');
You can pass in stream context options to the ``mkdir()`` method to modify how the bucket is created using the
parameters available to the
`CreateBucket <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.S3.S3Client.html#_createBucket>`_ operation.
.. code-block:: php
// Create a bucket in the EU region
mkdir('s3://bucket', stream_context_create(array(
's3' => array(
'LocationConstraint' => 'eu-west-1'
)
));
You can delete buckets using the ``rmdir()`` function.
.. code-block:: php
// Delete a bucket
rmdir('s3://bucket');
.. note::
A bucket can only be deleted if it is empty.
Listing the contents of a bucket
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The `opendir() <http://www.php.net/manual/en/function.opendir.php>`_,
`readdir() <http://www.php.net/manual/en/function.readdir.php>`_,
`rewinddir() <http://www.php.net/manual/en/function.rewinddir.php>`_, and
`closedir() <http://php.net/manual/en/function.closedir.php>`_ PHP functions can be used with the Amazon S3 stream
wrapper to traverse the contents of a bucket. You can pass in parameters available to the
`ListObjects <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.S3.S3Client.html#_listObjects>`_ operation as
custom stream context options to the ``opendir()`` function to modify how objects are listed.
.. code-block:: php
$dir = "s3://bucket/";
if (is_dir($dir) && ($dh = opendir($dir))) {
while (($file = readdir($dh)) !== false) {
echo "filename: {$file} : filetype: " . filetype($dir . $file) . "\n";
}
closedir($dh);
}
You can recursively list each object and prefix in a bucket using PHP's
`RecursiveDirectoryIterator <http://php.net/manual/en/class.recursivedirectoryiterator.php>`_.
.. code-block:: php
$dir = 's3://bucket';
$iterator = new RecursiveIteratorIterator(new RecursiveDirectoryIterator($dir));
foreach ($iterator as $file) {
echo $file->getType() . ': ' . $file . "\n";
}
Another easy way to list the contents of the bucket is using the
`Symfony2 Finder component <http://symfony.com/doc/master/components/finder.html>`_.
.. code-block:: php
<?php
require 'vendor/autoload.php';
use Symfony\Component\Finder\Finder;
$aws = Aws\Common\Aws::factory('/path/to/config.json');
$s3 = $aws->get('s3')->registerStreamWrapper();
$finder = new Finder();
// Get all files and folders (key prefixes) from "bucket" that are less than 100k
// and have been updated in the last year
$finder->in('s3://bucket')
->size('< 100K')
->date('since 1 year ago');
foreach ($finder as $file) {
echo $file->getType() . ": {$file}\n";
}
Syncing data with Amazon S3
---------------------------
Uploading a directory to a bucket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Uploading a local directory to an Amazon S3 bucket is rather simple:
.. code-block:: php
$client->uploadDirectory('/local/directory', 'my-bucket');
The ``uploadDirectory()`` method of a client will compare the contents of the local directory to the contents in the
Amazon S3 bucket and only transfer files that have changed. While iterating over the keys in the bucket and comparing
against the names of local files using a customizable filename to key converter, the changed files are added to an in
memory queue and uploaded concurrently. When the size of a file exceeds a customizable ``multipart_upload_size``
parameter, the uploader will automatically upload the file using a multipart upload.
Customizing the upload sync
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The method signature of the `uploadDirectory()` method allows for the following arguments:
.. code-block:: php
public function uploadDirectory($directory, $bucket, $keyPrefix = null, array $options = array())
By specifying ``$keyPrefix``, you can cause the uploaded objects to be placed under a virtual folder in the Amazon S3
bucket. For example, if the ``$bucket`` name is ``my-bucket`` and the ``$keyPrefix`` is 'testing/', then your files
will be uploaded to ``my-bucket`` under the ``testing/`` virtual folder:
``https://my-bucket.s3.amazonaws.com/testing/filename.txt``
The ``uploadDirectory()`` method also accepts an optional associative array of ``$options`` that can be used to further
control the transfer.
=========== ============================================================================================================
params Array of parameters to use with each ``PutObject`` or ``CreateMultipartUpload`` operation performed during
the transfer. For example, you can specify an ``ACL`` key to change the ACL of each uploaded object.
See `PutObject <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.S3.S3Client.html#_putObject>`_
for a list of available options.
base_dir Base directory to remove from each object key. By default, the ``$directory`` passed into the
``uploadDirectory()`` method will be removed from each object key.
force Set to true to upload every file, even if the file is already in Amazon S3 and has not changed.
concurrency Maximum number of parallel uploads (defaults to 5)
debug Set to true to enable debug mode to print information about each upload. Setting this value to an ``fopen``
resource will write the debug output to a stream rather than to ``STDOUT``.
=========== ============================================================================================================
In the following example, a local directory is uploaded with each object stored in the bucket using a ``public-read``
ACL, 20 requests are sent in parallel, and debug information is printed to standard output as each request is
transferred.
.. code-block:: php
$dir = '/local/directory';
$bucket = 'my-bucket';
$keyPrefix = '';
$client->uploadDirectory($dir, $bucket, $keyPrefix, array(
'params' => array('ACL' => 'public-read'),
'concurrency' => 20,
'debug' => true
));
More control with the UploadSyncBuilder
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``uploadDirectory()`` method is an abstraction layer over the much more powerful ``Aws\S3\Sync\UploadSyncBuilder``.
You can use an ``UploadSyncBuilder`` object directly if you need more control over the transfer. Using an
``UploadSyncBuilder`` allows for the following advanced features:
* Can upload only files that match a glob expression
* Can upload only files that match a regular expression
* Can specify a custom ``\Iterator`` object to use to yield files to an ``UploadSync`` object. This can be used, for
example, to filter out which files are transferred even further using something like the
`Symfony 2 Finder component <http://symfony.com/doc/master/components/finder.html>`_.
* Can specify the ``Aws\S3\Sync\FilenameConverterInterface`` objects used to convert Amazon S3 object names to local
filenames and vice versa. This can be useful if you require files to be renamed in a specific way.
.. code-block:: php
use Aws\S3\Sync\UploadSyncBuilder;
UploadSyncBuilder::getInstance()
->setClient($client)
->setBucket('my-bucket')
->setAcl('public-read')
->uploadFromGlob('/path/to/file/*.php')
->build()
->transfer();
Downloading a bucket to a directory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can download the objects stored in an Amazon S3 bucket using features similar to the ``uploadDirectory()`` method
and the ``UploadSyncBuilder``. You can download the entire contents of a bucket using the
``Aws\S3\S3Client::downloadBucket()`` method.
The following example will download all of the objects from ``my-bucket`` and store them in ``/local/directory``.
Object keys that are under virtual subfolders are converted into a nested directory structure when downloading the
objects. Any directories missing on the local filesystem will be created automatically.
.. code-block:: php
$client->downloadBucket('/local/directory', 'my-bucket');
Customizing the download sync
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The method signature of the ``downloadBucket()`` method allows for the following arguments:
.. code-block:: php
public function downloadBucket($directory, $bucket, $keyPrefix = null, array $options = array())
By specifying ``$keyPrefix``, you can limit the downloaded objects to only keys that begin with the specified
``$keyPrefix``. This, for example, can be useful for downloading objects under a specific virtual directory.
The ``downloadBucket()`` method also accepts an optional associative array of ``$options`` that can be used to further
control the transfer.
=============== ============================================================================================================
params Array of parameters to use with each ``GetObject`` operation performed during the transfer. See
`GetObject <http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.S3.S3Client.html#_getObject>`_
for a list of available options.
base_dir Base directory to remove from each object key when downloading. By default, the entire object key is
used to determine the path to the file on the local filesystem.
force Set to true to download every file, even if the file is already on the local filesystem and has not
changed.
concurrency Maximum number of parallel downloads (defaults to 10)
debug Set to true to enable debug mode to print information about each download. Setting this value to an
``fopen`` resource will write the debug output to a stream rather than to ``STDOUT``.
allow_resumable Set to true to allow previously interrupted downloads to be resumed using a Range GET
=============== ============================================================================================================
More control with the DownloadSyncBuilder
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``downloadBucket()`` method is an abstraction layer over the much more powerful
``Aws\S3\Sync\DownloadSyncBuilder``. You can use a ``DownloadSyncBuilder`` object directly if you need more control
over the transfer. Using the ``DownloadSyncBuilder`` allows for the following advanced features:
* Can download only files that match a regular expression
* Just like the ``UploadSyncBuilder``, you can specify a custom ``\Iterator`` object to use to yield files to a
``DownloadSync`` object.
* Can specify the ``Aws\S3\Sync\FilenameConverterInterface`` objects used to convert Amazon S3 object names to local
filenames and vice versa.
.. code-block:: php
use Aws\S3\Sync\DownloadSyncBuilder;
DownloadSyncBuilder::getInstance()
->setClient($client)
->setDirectory('/path/to/directory')
->setBucket('my-bucket')
->setKeyPrefix('/under-prefix')
->allowResumableDownloads()
->build()
->transfer();
Cleaning up
-----------
Now that we've taken a tour of how you can use the Amazon S3 client, let's clean up any resources we may have created.
.. example:: S3/Integration/S3_20060301_Test.php testCleanUpBucket

View File

@ -0,0 +1,3 @@
.. service:: Ses
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,164 @@
.. service:: SimpleDb
Creating domains
----------------
The first step in storing data within Amazon SimpleDB is to
`create one or more domains <http://docs.aws.amazon.com/AmazonSimpleDB/latest/GettingStartedGuide/CreatingADomain.html>`_.
Domains are similar to database tables, except that you cannot perform functions across multiple domains, such as
querying multiple domains or using foreign keys. As a consequence, you should plan an Amazon SimpleDB data
architecture that will meet the needs of your project.
Let's use the CreateDomain operation of the |serviceFullName| client to create a domain.
.. code-block:: php
$client->createDomain(array('DomainName' => 'mydomain'));
List all domains
----------------
Now that the domain is created, we can list the domains in our account to verify that it exists. This is done using the
ListDomains operation and the ListDomains iterator.
.. code-block:: php
$domains = $client->getIterator('ListDomains')->toArray();
var_export($domains);
// Lists an array of domain names, including "mydomain"
Retrieving a domain
-------------------
You can get more information about a domain using the DomainMetadata operation. This operation returns information about
a domain, including when the domain was created, the number of items and attributes, and the size of attribute names
and values.
.. code-block:: php
$result = $client->domainMetadata(array('DomainName' => 'mydomain'));
echo $result['ItemCount'] . "\n";
echo $result['ItemNamesSizeBytes'] . "\n";
echo $result['AttributeNameCount'] . "\n";
echo $result['AttributeNamesSizeBytes'] . "\n";
echo $result['AttributeValueCount'] . "\n";
echo $result['AttributeValuesSizeBytes'] . "\n";
echo $result['Timestamp'] . "\n";
Adding items
------------
After creating a domain, you are ready to start putting data into it. Domains consist of items, which are described by
attribute name-value pairs. Items are added to a domain using the PutAttributes operation.
.. code-block:: php
$client->putAttributes(array(
'DomainName' => 'mydomain',
'ItemName' => 'test',
'Attributes' => array(
array('Name' => 'a', 'Value' => 1, 'Replace' => true),
array('Name' => 'b', 'Value' => 2),
)
));
.. note::
When you put attributes, notice that the Replace parameter is optional, and set to false by default. If you do not
explicitly set Replace to true, a new attribute name-value pair is created each time; even if the Name value
already exists in your Amazon SimpleDB domain.
Retrieving items
----------------
GetAttributes
~~~~~~~~~~~~~
We can check to see if the item was added correctly by retrieving the specific item by name using the GetAttribute
operation.
.. code-block:: php
$result = $client->getAttributes(array(
'DomainName' => 'mydomain',
'ItemName' => 'test',
'Attributes' => array(
'a', 'b'
),
'ConsistentRead' => true
));
Notice that we set the `ConsistentRead` option to `true`. Amazon SimpleDB keeps multiple copies of each domain. A
successful write (using PutAttributes, BatchPutAttributes, DeleteAttributes, BatchDeleteAttributes, CreateDomain, or
DeleteDomain) guarantees that all copies of the domain will durably persist. Amazon SimpleDB supports two read
consistency options: eventually consistent read and consistent read. A consistent read (using Select or GetAttributes
with ConsistentRead=true) returns a result that reflects all writes that received a successful response prior to the
read.
You can find out more about consistency and |serviceFullName| in the service's
`developer guide on consistency <http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/ConsistencySummary.html>`_.
Select
~~~~~~
You can retrieve attributes for items by name, but |serviceFullName| also supports the Select operation. The Select
operation returns a set of Attributes for ItemNames that match the select expression. Select is similar to the standard
SQL SELECT statement.
Let's write a select query that will return all items withe the `a` attribute set to `1`.
.. code-block:: php
$result = $client->select(array(
'SelectExpression' => "select * from mydomain where a = '1'"
));
foreach ($result['Items'] as $item) {
echo $item['Name'] . "\n";
var_export($item['Attributes']);
}
Because some responses will be truncated and require subsequent requests, it is recommended to always use the
Select iterator to easily retrieve an entire result set.
.. code-block:: php
$iterator = $client->getIterator('Select', array(
'SelectExpression' => "select * from mydomain where a = '1'"
));
foreach ($iterator as $item) {
echo $item['Name'] . "\n";
var_export($item['Attributes']);
}
You can find much more information about the Select operation in the service's
`developer guide on select <http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/UsingSelect.html>`_.
Deleting items
--------------
You can delete specific attributes of an item or an entire item using the DeleteAttributes operation. If all attributes
of an item are deleted, the item is deleted.
Let's go ahead and delete the item we created in `mydomain`.
.. code-block:: php
$client->deleteAttributes(array(
'DomainName' => 'mydomain',
'ItemName' => 'test'
));
Because we did not specify an `Attributes` parameter, the entire item is deleted.
Deleting domains
----------------
Now that we've explored some of the features of |serviceFullName|, we should delete our testing data. The
DeleteDomain operation deletes a domain. Any items (and their attributes) in the domain are deleted as well. The
DeleteDomain operation might take 10 or more seconds to complete.
.. code-block:: php
$client->deleteDomain(array('DomainName' => 'mydomain'));

View File

@ -0,0 +1,3 @@
.. service:: Sns
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,101 @@
.. service:: Sqs
Creating a queue
----------------
Now, let's create a queue. You can create a standard queue by just providing a name. Make sure to get the queue's URL
from the result, since the queue URL is the unique identifier used to specify the queue in order to send and receive
messages.
.. code-block:: php
$result = $client->createQueue(array('QueueName' => 'my-queue'));
$queueUrl = $result->get('QueueUrl');
You can also set attributes on your queue when you create it.
.. code-block:: php
use Aws\Common\Enum\Size;
use Aws\Sqs\Enum\QueueAttribute;
$result = $client->createQueue(array(
'QueueName' => 'my-queue',
'Attributes' => array(
QueueAttribute::DELAY_SECONDS => 5,
QueueAttribute::MAXIMUM_MESSAGE_SIZE => 4 * Size::KB,
),
));
$queueUrl = $result->get('QueueUrl');
Or you can also set queue attributes later.
.. code-block:: php
use Aws\Common\Enum\Time;
use Aws\Sqs\Enum\QueueAttribute;
$result = $client->setQueueAttributes(array(
'QueueUrl' => $queueUrl,
'Attributes' => array(
QueueAttribute::VISIBILITY_TIMEOUT => 2 * Time::MINUTES,
),
));
Sending messages
----------------
Sending a message to a queue is straight forward with the ``SendMessage`` command.
.. code-block:: php
$client->sendMessage(array(
'QueueUrl' => $queueUrl,
'MessageBody' => 'An awesome message!',
));
You can overwrite the queue's default delay for a message when you send it.
.. code-block:: php
$client->sendMessage(array(
'QueueUrl' => $queueUrl,
'MessageBody' => 'An awesome message!',
'DelaySeconds' => 30,
));
Receiving messages
------------------
Receiving messages is done with the ``ReceiveMessage`` command.
.. code-block:: php
$result = $client->receiveMessage(array(
'QueueUrl' => $queueUrl,
));
foreach ($result->getPath('Messages/*/Body') as $messageBody) {
// Do something with the message
echo $messageBody;
}
By default, only one message will be returned. If you want to get more messages, make sure to use the
``MaxNumberOfMessages`` parameter and specify a number of messages (1 to 10). Remember that you are not guaranteed to
receive that many messages, but you can receive up to that amount depending on how many are actually in the queue at
the time of your request.
SQS also supports `"long polling"
<http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html>`_, meaning that you
can instruct SQS to hold the connection open with the SDK for up to 20 seconds in order to wait for a message to arrive
in the queue. To configure this behavior, you must use the ``WaitTimeSeconds`` parameter.
.. code-block:: php
$result = $client->receiveMessage(array(
'QueueUrl' => $queueUrl,
'WaitTimeSeconds' => 10,
));
.. note:: You can also configure long-polling at the queue level by setting the ``ReceiveMessageWaitTimeSeconds`` queue
attribute.

View File

@ -0,0 +1,3 @@
.. service:: StorageGateway
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: Sts
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: Support
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,3 @@
.. service:: Swf
.. include:: _snippets/incomplete.txt

View File

@ -0,0 +1,277 @@
==================
Side-by-side Guide
==================
This guide helps you install, configure, and run Version 1 and Version 2 of the AWS SDK for PHP side-by-side within the
same application or project. Please see the :doc:`migration-guide` for more information on migrating code from the
original AWS SDK for PHP to Version 2.
Since Version 2 of the AWS SDK for PHP now supports all of the AWS services supported by Version 1 (and more), it is
recommended that you should begin migrating your code to use Version 2 of the SDK. Using both SDKs side-by-side may be
helpful if your use case requires you to migrate only sections of your code at a time.
Installing and Including the SDKs
---------------------------------
To install and include the SDKs in your project, you must first choose whether or not to use Composer.
Using Composer
~~~~~~~~~~~~~~
Using `Composer <http://getcomposer.org>`_ is the recommended way to install both versions of the AWS SDK for PHP.
Composer is a dependency management tool for PHP that allows you to declare the dependencies your project requres and
installs them into your project. In order to simultaneously use both versions of the SDK in the same project through
Composer, you must do the following:
#. Add both of the SDKs as dependencies in your project's ``composer.json`` file.
.. code-block:: js
{
"require": {
"aws/aws-sdk-php": "*",
"amazonwebservices/aws-sdk-for-php": "*"
}
}
**Note:** Consider tightening your dependencies to a known version when deploying mission critical applications
(e.g., ``2.0.*``).
#. Download and install Composer.
.. code-block:: sh
curl -s "http://getcomposer.org/installer" | php
#. Install your dependencies.
.. code-block:: sh
php composer.phar install
#. Require Composer's autoloader.
Composer also prepares an autoload file that's capable of autoloading all of the classes in any of the libraries that
it downloads. To use it, just add the following line to your code's bootstrap process.
.. code-block:: php
require '/path/to/sdk/vendor/autoload.php';
You can find out more on how to install Composer, configure autoloading, and other best-practices for defining
dependencies at `getcomposer.org <http://getcomposer.org>`_.
Without Composer
~~~~~~~~~~~~~~~~
Without Composer, you must manage your own project dependencies.
#. Download both of the SDKs (via PEAR, GitHub, or the AWS website) into a location accessible by your project. Make
certain to use the pre-packaged ``aws.phar`` file, which includes all of the dependencies for the AWS SDK for PHP.
#. In your code's bootstrap process, you need to explicitly require the bootstrap file from Version 1 of the SDK and the
``aws.phar`` file containing Version 2 of the SDK:
.. code-block:: php
// Include each of the SDK's bootstrap files to setup autoloading
require '/path/to/sdk.class.php'; // Load the Version 1 bootstrap file
require '/path/to/aws.phar'; // Load the Version 2 pre-packaged phar file
Configuring and Instantiating the SDKs
--------------------------------------
How you configure and instantiate the SDKs is determined by whether or not you are using the service builder
(``Aws\Common\Aws`` class).
Instantiating Clients via the Service Builder
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The service builder (``Aws\Common\Aws class``) in the AWS SDK for PHP enables configuring all service clients with the
same credentials. It also accepts additional settings for some or all of the clients. The service builder functionality
is inherited from the `Guzzle <http://guzzlephp.org>`_ project.
You can pass the service builder a configuration file containing your credentials and other settings. It will then
inject these into all of the service clients your code instantiates. For more information about the configuration file,
please read the :doc:`configuration` section of the guide. When using both SDKs side-by-side, your configuration file
must include the following line:
.. code-block:: php
'includes' => array('_sdk1'),
This will automatically set up the service clients from Version 1 of the SDK making them accessible through the service
builder by keys such as ``v1.s3`` and ``v1.cloudformation``. Here is an example configuration file that includes
referencing the Version 1 of the SDK:
.. code-block:: php
<?php return array(
'includes' => array('_sdk1'),
'services' => array(
'default_settings' => array(
'params' => array(
'key' => 'your-aws-access-key-id',
'secret' => 'your-aws-secret-access-key',
'region' => 'us-west-2'
)
)
)
);
Your code must instantiate the service builder through its factory method by passing in the path of the configuration
file. Your code then retrieves instances of the specific service clients from the returned builder object.
.. code-block:: php
use Aws\Common\Aws;
// Instantiate the service builder
$aws = Aws::factory('/path/to/your/config.php');
// Instantiate S3 clients via the service builder
$s3v1 = $aws->get('v1.s3'); // All Version 1 clients are prefixed with "v1."
$s3v2 = $aws->get('s3');
Instantiating Clients via Client Factories
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Your code can instantiate service clients using their respective ``factory()`` methods by passing in an array of
configuration data, including your credentials. The ``factory()`` will work for clients in either versions of the SDK.
.. code-block:: php
use Aws\S3\S3Client;
// Create an array of configuration options
$config = array(
'key' => 'your-aws-access-key-id',
'secret' => 'your-aws-secret-access-key',
);
// Instantiate Amazon S3 clients from both SDKs via their factory methods
$s3v1 = AmazonS3::factory($config);
$s3v2 = S3Client::factory($config);
Optionally, you could alias the classes to make it clearer which version of the SDK they are from.
.. code-block:: php
use AmazonS3 as S3ClientV1;
use Aws\S3\S3Client as S3ClientV2;
$config = array(
'key' => 'your-aws-access-key-id',
'secret' => 'your-aws-secret-access-key',
);
$s3v1 = S3ClientV1::factory($config);
$s3v2 = S3ClientV2::factory($config);
Complete Examples
-----------------
The following two examples fully demonstrate including, configuring, instantiating, and using both SDKs side-by-side.
These examples adopt the recommended practices of using Composer and the service builder.
Example 1 - Dual Amazon S3 Clients
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This example demonstrates using an Amazon S3 client from the AWS SDK for PHP working side-by-side with an Amazon S3
client from the first PHP SDK.
.. code-block:: php
<?php
require 'vendor/autoload.php';
$aws = Aws\Common\Aws::factory('/path/to/config.json');
$s3v1 = $aws->get('v1.s3');
$s3v2 = $aws->get('s3');
echo "ListBuckets with SDK Version 1:\n";
echo "-------------------------------\n";
$response = $s3v1->listBuckets();
if ($response->isOK()) {
foreach ($response->body->Buckets->Bucket as $bucket) {
echo "- {$bucket->Name}\n";
}
} else {
echo "Request failed.\n";
}
echo "\n";
echo "ListBuckets with SDK Version 2:\n";
echo "-------------------------------\n";
try {
$result = $s3v2->listBuckets();
foreach ($result['Buckets'] as $bucket) {
echo "- {$bucket['Name']}\n";
}
} catch (Aws\S3\Exception\S3Exception $e) {
echo "Request failed.\n";
}
echo "\n";
Example 2 - Amazon DynamoDB and Amazon SNS Clients
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This example shows how the AWS SDK for PHP DynamoDB client works together with the SNS client from the original SDK.
For this example, an ice cream parlor publishes a daily message (via SNS) containing its "flavors of the day" to
subscribers. First, it retrieves the flavors of the day from its DynamoDB database using the AWS SDK for PHP DynamoDB
client. It then uses the SNS client from the first SDK to publish a message to its SNS topic.
.. code-block:: php
<?php
require 'vendor/autoload.php';
$aws = Aws\Common\Aws::factory('/path/to/config.php');
// Instantiate the clients
$ddb = $aws->get('dynamodb');
$sns = $aws->get('v1.sns');
$sns->set_region(AmazonSNS::REGION_US_W2);
// Get today's flavors from DynamoDB using Version 2 of the SDK
$date = new DateTime();
$flavors = $ddb->getItem(array(
'TableName' => 'flavors-of-the-day',
'Key' => array(
'HashKeyElement' => array('N' => $date->format('n')),
'RangeKeyElement' => array('N' => $date->format('j'))
)
))->getResult()->getPath('Item/flavors/SS');
// Generate the message
$today = $date->format('l, F jS');
$message = "It's {$today}, and here are our flavors of the day:\n";
foreach ($flavors as $flavor) {
$message .= "- {$flavor}\n";
}
$message .= "\nCome visit Mr. Foo\'s Ice Cream Parlor on 5th and Pine!\n";
echo "{$message}\n";
// Send today's flavors to subscribers using Version 1 of the SDK
$response = $sns->publish('flavors-of-the-day-sns-topic', $message, array(
'Subject' => 'Flavors of the Day - Mr. Foo\'s Ice Cream Parlor'
));
if ($response->isOK()) {
echo "Sent the flavors of the day to your subscribers.\n";
} else {
echo "There was an error sending the flavors of the day to your subscribers.\n";
}
Final Notes
-----------
Remember that **instantiating clients from the original SDK using the service builder from AWS SDK for PHP does not
change how those clients work**. For example, notice the differences in response handling between SDK versions. For a
full list of differences between the versions, please see the :doc:`migration-guide`.
For more information about using the original version of the SDK, please see the `Version 1 API Documentation
<http://docs.aws.amazon.com/AWSSDKforPHP/latest/>`_ and the `Version 1 SDK README
<https://github.com/amazonwebservices/aws-sdk-for-php/blob/master/README.md>`_.

View File

@ -0,0 +1,44 @@
<?xml version="1.0" encoding="UTF-8"?>
<phpunit bootstrap="./tests/bootstrap.php"
colors="true"
processIsolation="false"
stopOnFailure="false"
syntaxCheck="false"
convertErrorsToExceptions="true"
convertNoticesToExceptions="true"
convertWarningsToExceptions="true"
testSuiteLoaderClass="PHPUnit_Runner_StandardTestSuiteLoader">
<php>
<!-- If you want to run the integration tests, you will need to provide
the path to a service configuration file. You WILL be charged
for your usage if you choose to run the integration tests. -->
<server name="CONFIG" value="test_services.json" />
<!-- The PREFIX is added to any globally shared namespaced
resources that are created during integration tests -->
<server name="PREFIX" value="hostname" />
<!-- Set to one to enable wire logging in integration tests that are
configured to do wire logging -->
<server name="WIRE_LOGGING" value="0" />
<!-- An SES-verified email address for SES integration tests -->
<server name="VERIFIED_EMAIL" value="example@example.com" />
</php>
<testsuites>
<testsuite name="Aws">
<directory>./tests/Aws/Tests</directory>
</testsuite>
</testsuites>
<!-- Only execute integration and performance tests -->
<groups>
<include>
<group>integration</group>
<group>performance</group>
</include>
</groups>
</phpunit>

79
vendor/aws/aws-sdk-php/phpunit.xml.dist vendored Normal file
View File

@ -0,0 +1,79 @@
<?xml version="1.0" encoding="UTF-8"?>
<phpunit bootstrap="./tests/bootstrap.php"
colors="true"
processIsolation="false"
stopOnFailure="false"
syntaxCheck="false"
convertErrorsToExceptions="true"
convertNoticesToExceptions="true"
convertWarningsToExceptions="true"
testSuiteLoaderClass="PHPUnit_Runner_StandardTestSuiteLoader">
<testsuites>
<testsuite name="Aws">
<directory>tests/Aws/Tests</directory>
</testsuite>
</testsuites>
<!-- Exclude the integration tests in regular unit tests -->
<groups>
<exclude>
<group>integration</group>
<group>performance</group>
</exclude>
</groups>
<logging>
<log type="junit" target="build/artifacts/logs/junit.xml" logIncompleteSkipped="false" />
</logging>
<filter>
<whitelist>
<directory suffix=".php">./src/Aws</directory>
<exclude>
<directory suffix="Interface.php">./src/Aws</directory>
<directory>./src/Aws/**/Enum</directory>
<directory>./src/Aws/**/Resources</directory>
<directory>./src/Aws/Common/Exception/DomainException.php</directory>
<directory>./src/Aws/Common/Exception/InvalidArgumentException.php</directory>
<directory>./src/Aws/Common/Exception/LogicException.php</directory>
<directory>./src/Aws/Common/Exception/OverflowException.php</directory>
<directory>./src/Aws/Common/Exception/OutOfBoundsException.php</directory>
<directory>./src/Aws/Common/Exception/RuntimeException.php</directory>
<directory>./src/Aws/Common/Exception/UnexpectedValueException.php</directory>
<directory suffix="Exception.php">./src/Aws/AutoScaling/Exception</directory>
<directory suffix="Exception.php">./src/Aws/CloudFormation/Exception</directory>
<directory suffix="Exception.php">./src/Aws/CloudFront/Exception</directory>
<directory suffix="Exception.php">./src/Aws/CloudSearch/Exception</directory>
<directory suffix="Exception.php">./src/Aws/CloudTrail/Exception</directory>
<directory suffix="Exception.php">./src/Aws/CloudWatch/Exception</directory>
<directory suffix="Exception.php">./src/Aws/DataPipeline/Exception</directory>
<directory suffix="Exception.php">./src/Aws/DirectConnect/Exception</directory>
<directory suffix="Exception.php">./src/Aws/DynamoDb/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Ec2/Exception</directory>
<directory suffix="Exception.php">./src/Aws/ElastiCache/Exception</directory>
<directory suffix="Exception.php">./src/Aws/ElasticBeanstalk/Exception</directory>
<directory suffix="Exception.php">./src/Aws/ElasticLoadBalancing/Exception</directory>
<directory suffix="Exception.php">./src/Aws/ElasticTranscoder/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Emr/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Glacier/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Iam/Exception</directory>
<directory suffix="Exception.php">./src/Aws/ImportExport/Exception</directory>
<directory suffix="Exception.php">./src/Aws/OpsWorks/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Rds/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Redshift/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Route53/Exception</directory>
<directory suffix="Exception.php">./src/Aws/S3/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Ses/Exception</directory>
<directory suffix="Exception.php">./src/Aws/SimpleDb/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Sns/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Sqs/Exception</directory>
<directory suffix="Exception.php">./src/Aws/StorageGateway/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Sts/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Support/Exception</directory>
<directory suffix="Exception.php">./src/Aws/Swf/Exception</directory>
</exclude>
</whitelist>
</filter>
</phpunit>

View File

@ -0,0 +1,97 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling;
use Aws\Common\Client\AbstractClient;
use Aws\Common\Client\ClientBuilder;
use Aws\Common\Enum\ClientOptions as Options;
use Guzzle\Common\Collection;
use Guzzle\Service\Resource\Model;
use Guzzle\Service\Resource\ResourceIteratorInterface;
/**
* Client to interact with Auto Scaling
*
* @method Model createAutoScalingGroup(array $args = array()) {@command AutoScaling CreateAutoScalingGroup}
* @method Model createLaunchConfiguration(array $args = array()) {@command AutoScaling CreateLaunchConfiguration}
* @method Model createOrUpdateTags(array $args = array()) {@command AutoScaling CreateOrUpdateTags}
* @method Model deleteAutoScalingGroup(array $args = array()) {@command AutoScaling DeleteAutoScalingGroup}
* @method Model deleteLaunchConfiguration(array $args = array()) {@command AutoScaling DeleteLaunchConfiguration}
* @method Model deleteNotificationConfiguration(array $args = array()) {@command AutoScaling DeleteNotificationConfiguration}
* @method Model deletePolicy(array $args = array()) {@command AutoScaling DeletePolicy}
* @method Model deleteScheduledAction(array $args = array()) {@command AutoScaling DeleteScheduledAction}
* @method Model deleteTags(array $args = array()) {@command AutoScaling DeleteTags}
* @method Model describeAdjustmentTypes(array $args = array()) {@command AutoScaling DescribeAdjustmentTypes}
* @method Model describeAutoScalingGroups(array $args = array()) {@command AutoScaling DescribeAutoScalingGroups}
* @method Model describeAutoScalingInstances(array $args = array()) {@command AutoScaling DescribeAutoScalingInstances}
* @method Model describeAutoScalingNotificationTypes(array $args = array()) {@command AutoScaling DescribeAutoScalingNotificationTypes}
* @method Model describeLaunchConfigurations(array $args = array()) {@command AutoScaling DescribeLaunchConfigurations}
* @method Model describeMetricCollectionTypes(array $args = array()) {@command AutoScaling DescribeMetricCollectionTypes}
* @method Model describeNotificationConfigurations(array $args = array()) {@command AutoScaling DescribeNotificationConfigurations}
* @method Model describePolicies(array $args = array()) {@command AutoScaling DescribePolicies}
* @method Model describeScalingActivities(array $args = array()) {@command AutoScaling DescribeScalingActivities}
* @method Model describeScalingProcessTypes(array $args = array()) {@command AutoScaling DescribeScalingProcessTypes}
* @method Model describeScheduledActions(array $args = array()) {@command AutoScaling DescribeScheduledActions}
* @method Model describeTags(array $args = array()) {@command AutoScaling DescribeTags}
* @method Model describeTerminationPolicyTypes(array $args = array()) {@command AutoScaling DescribeTerminationPolicyTypes}
* @method Model disableMetricsCollection(array $args = array()) {@command AutoScaling DisableMetricsCollection}
* @method Model enableMetricsCollection(array $args = array()) {@command AutoScaling EnableMetricsCollection}
* @method Model executePolicy(array $args = array()) {@command AutoScaling ExecutePolicy}
* @method Model putNotificationConfiguration(array $args = array()) {@command AutoScaling PutNotificationConfiguration}
* @method Model putScalingPolicy(array $args = array()) {@command AutoScaling PutScalingPolicy}
* @method Model putScheduledUpdateGroupAction(array $args = array()) {@command AutoScaling PutScheduledUpdateGroupAction}
* @method Model resumeProcesses(array $args = array()) {@command AutoScaling ResumeProcesses}
* @method Model setDesiredCapacity(array $args = array()) {@command AutoScaling SetDesiredCapacity}
* @method Model setInstanceHealth(array $args = array()) {@command AutoScaling SetInstanceHealth}
* @method Model suspendProcesses(array $args = array()) {@command AutoScaling SuspendProcesses}
* @method Model terminateInstanceInAutoScalingGroup(array $args = array()) {@command AutoScaling TerminateInstanceInAutoScalingGroup}
* @method Model updateAutoScalingGroup(array $args = array()) {@command AutoScaling UpdateAutoScalingGroup}
* @method ResourceIteratorInterface getDescribeAutoScalingGroupsIterator(array $args = array()) The input array uses the parameters of the DescribeAutoScalingGroups operation
* @method ResourceIteratorInterface getDescribeAutoScalingInstancesIterator(array $args = array()) The input array uses the parameters of the DescribeAutoScalingInstances operation
* @method ResourceIteratorInterface getDescribeLaunchConfigurationsIterator(array $args = array()) The input array uses the parameters of the DescribeLaunchConfigurations operation
* @method ResourceIteratorInterface getDescribeNotificationConfigurationsIterator(array $args = array()) The input array uses the parameters of the DescribeNotificationConfigurations operation
* @method ResourceIteratorInterface getDescribePoliciesIterator(array $args = array()) The input array uses the parameters of the DescribePolicies operation
* @method ResourceIteratorInterface getDescribeScalingActivitiesIterator(array $args = array()) The input array uses the parameters of the DescribeScalingActivities operation
* @method ResourceIteratorInterface getDescribeScheduledActionsIterator(array $args = array()) The input array uses the parameters of the DescribeScheduledActions operation
* @method ResourceIteratorInterface getDescribeTagsIterator(array $args = array()) The input array uses the parameters of the DescribeTags operation
*
* @link http://docs.aws.amazon.com/aws-sdk-php/guide/latest/service-autoscaling.html User guide
* @link http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.AutoScaling.AutoScalingClient.html API docs
*/
class AutoScalingClient extends AbstractClient
{
const LATEST_API_VERSION = '2011-01-01';
/**
* Factory method to create a new Auto Scaling client using an array of configuration options.
*
* @param array|Collection $config Client configuration data
*
* @return self
* @see \Aws\Common\Client\DefaultClient for a list of available configuration options
*/
public static function factory($config = array())
{
return ClientBuilder::factory(__NAMESPACE__)
->setConfig($config)
->setConfigDefaults(array(
Options::VERSION => self::LATEST_API_VERSION,
Options::SERVICE_DESCRIPTION => __DIR__ . '/Resources/autoscaling-%s.php'
))
->build();
}
}

View File

@ -0,0 +1,31 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling\Enum;
use Aws\Common\Enum;
/**
* Contains enumerable LifecycleState values
*/
class LifecycleState extends Enum
{
const PENDING = 'Pending';
const QUARANTINED = 'Quarantined';
const IN_SERVICE = 'InService';
const TERMINATING = 'Terminating';
const TERMINATED = 'Terminated';
}

View File

@ -0,0 +1,34 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling\Enum;
use Aws\Common\Enum;
/**
* Contains enumerable ScalingActivityStatusCode values
*/
class ScalingActivityStatusCode extends Enum
{
const WAITING_FOR_SPOT_INSTANCE_REQUEST_ID = 'WaitingForSpotInstanceRequestId';
const WAITING_FOR_SPOT_INSTANCE_ID = 'WaitingForSpotInstanceId';
const WAITING_FOR_INSTANCE_ID = 'WaitingForInstanceId';
const PRE_IN_SERVICE = 'PreInService';
const IN_PROGRESS = 'InProgress';
const SUCCESSFUL = 'Successful';
const FAILED = 'Failed';
const CANCELLED = 'Cancelled';
}

View File

@ -0,0 +1,22 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling\Exception;
/**
* The named Auto Scaling group or launch configuration already exists.
*/
class AlreadyExistsException extends AutoScalingException {}

View File

@ -0,0 +1,24 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling\Exception;
use Aws\Common\Exception\ServiceResponseException;
/**
* Default service exception class
*/
class AutoScalingException extends ServiceResponseException {}

View File

@ -0,0 +1,22 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling\Exception;
/**
* The NextToken value is invalid.
*/
class InvalidNextTokenException extends AutoScalingException {}

View File

@ -0,0 +1,22 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling\Exception;
/**
* The quota for capacity groups or launch configurations for this customer has already been reached.
*/
class LimitExceededException extends AutoScalingException {}

View File

@ -0,0 +1,22 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling\Exception;
/**
* This is returned when you cannot delete a launch configuration or Auto Scaling group because it is being used.
*/
class ResourceInUseException extends AutoScalingException {}

View File

@ -0,0 +1,22 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\AutoScaling\Exception;
/**
* You cannot delete an Auto Scaling group while there are scaling activities in progress for that group.
*/
class ScalingActivityInProgressException extends AutoScalingException {}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,74 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation;
use Aws\Common\Client\AbstractClient;
use Aws\Common\Client\ClientBuilder;
use Aws\Common\Enum\ClientOptions as Options;
use Guzzle\Common\Collection;
use Guzzle\Service\Resource\Model;
use Guzzle\Service\Resource\ResourceIteratorInterface;
/**
* Client to interact with AWS CloudFormation
*
* @method Model cancelUpdateStack(array $args = array()) {@command CloudFormation CancelUpdateStack}
* @method Model createStack(array $args = array()) {@command CloudFormation CreateStack}
* @method Model deleteStack(array $args = array()) {@command CloudFormation DeleteStack}
* @method Model describeStackEvents(array $args = array()) {@command CloudFormation DescribeStackEvents}
* @method Model describeStackResource(array $args = array()) {@command CloudFormation DescribeStackResource}
* @method Model describeStackResources(array $args = array()) {@command CloudFormation DescribeStackResources}
* @method Model describeStacks(array $args = array()) {@command CloudFormation DescribeStacks}
* @method Model estimateTemplateCost(array $args = array()) {@command CloudFormation EstimateTemplateCost}
* @method Model getStackPolicy(array $args = array()) {@command CloudFormation GetStackPolicy}
* @method Model getTemplate(array $args = array()) {@command CloudFormation GetTemplate}
* @method Model listStackResources(array $args = array()) {@command CloudFormation ListStackResources}
* @method Model listStacks(array $args = array()) {@command CloudFormation ListStacks}
* @method Model setStackPolicy(array $args = array()) {@command CloudFormation SetStackPolicy}
* @method Model updateStack(array $args = array()) {@command CloudFormation UpdateStack}
* @method Model validateTemplate(array $args = array()) {@command CloudFormation ValidateTemplate}
* @method ResourceIteratorInterface getDescribeStackEventsIterator(array $args = array()) The input array uses the parameters of the DescribeStackEvents operation
* @method ResourceIteratorInterface getDescribeStacksIterator(array $args = array()) The input array uses the parameters of the DescribeStacks operation
* @method ResourceIteratorInterface getListStackResourcesIterator(array $args = array()) The input array uses the parameters of the ListStackResources operation
* @method ResourceIteratorInterface getListStacksIterator(array $args = array()) The input array uses the parameters of the ListStacks operation
*
* @link http://docs.aws.amazon.com/aws-sdk-php/guide/latest/service-cloudformation.html User guide
* @link http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws.CloudFormation.CloudFormationClient.html API docs
*/
class CloudFormationClient extends AbstractClient
{
const LATEST_API_VERSION = '2010-05-15';
/**
* Factory method to create a new AWS CloudFormation client using an array of configuration options.
*
* @param array|Collection $config Client configuration data
*
* @return self
* @see \Aws\Common\Client\DefaultClient for a list of available configuration options
*/
public static function factory($config = array())
{
return ClientBuilder::factory(__NAMESPACE__)
->setConfig($config)
->setConfigDefaults(array(
Options::VERSION => self::LATEST_API_VERSION,
Options::SERVICE_DESCRIPTION => __DIR__ . '/Resources/cloudformation-%s.php'
))
->build();
}
}

View File

@ -0,0 +1,27 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation\Enum;
use Aws\Common\Enum;
/**
* Contains enumerable Capability values
*/
class Capability extends Enum
{
const CAPABILITY_IAM = 'CAPABILITY_IAM';
}

View File

@ -0,0 +1,29 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation\Enum;
use Aws\Common\Enum;
/**
* Contains enumerable OnFailure values
*/
class OnFailure extends Enum
{
const DO_NOTHING = 'DO_NOTHING';
const ROLLBACK = 'ROLLBACK';
const DELETE = 'DELETE';
}

View File

@ -0,0 +1,35 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation\Enum;
use Aws\Common\Enum;
/**
* Contains enumerable ResourceStatus values
*/
class ResourceStatus extends Enum
{
const CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS';
const CREATE_FAILED = 'CREATE_FAILED';
const CREATE_COMPLETE = 'CREATE_COMPLETE';
const DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS';
const DELETE_FAILED = 'DELETE_FAILED';
const DELETE_COMPLETE = 'DELETE_COMPLETE';
const UPDATE_IN_PROGRESS = 'UPDATE_IN_PROGRESS';
const UPDATE_FAILED = 'UPDATE_FAILED';
const UPDATE_COMPLETE = 'UPDATE_COMPLETE';
}

View File

@ -0,0 +1,42 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation\Enum;
use Aws\Common\Enum;
/**
* Contains enumerable StackStatus values
*/
class StackStatus extends Enum
{
const CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS';
const CREATE_FAILED = 'CREATE_FAILED';
const CREATE_COMPLETE = 'CREATE_COMPLETE';
const ROLLBACK_IN_PROGRESS = 'ROLLBACK_IN_PROGRESS';
const ROLLBACK_FAILED = 'ROLLBACK_FAILED';
const ROLLBACK_COMPLETE = 'ROLLBACK_COMPLETE';
const DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS';
const DELETE_FAILED = 'DELETE_FAILED';
const DELETE_COMPLETE = 'DELETE_COMPLETE';
const UPDATE_IN_PROGRESS = 'UPDATE_IN_PROGRESS';
const UPDATE_COMPLETE_CLEANUP_IN_PROGRESS = 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS';
const UPDATE_COMPLETE = 'UPDATE_COMPLETE';
const UPDATE_ROLLBACK_IN_PROGRESS = 'UPDATE_ROLLBACK_IN_PROGRESS';
const UPDATE_ROLLBACK_FAILED = 'UPDATE_ROLLBACK_FAILED';
const UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS = 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS';
const UPDATE_ROLLBACK_COMPLETE = 'UPDATE_ROLLBACK_COMPLETE';
}

View File

@ -0,0 +1,22 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation\Exception;
/**
* Exception that occurs when a AlreadyExistsException error is encountered
*/
class AlreadyExistsException extends CloudFormationException {}

View File

@ -0,0 +1,24 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation\Exception;
use Aws\Common\Exception\ServiceResponseException;
/**
* Default service exception class
*/
class CloudFormationException extends ServiceResponseException {}

View File

@ -0,0 +1,22 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation\Exception;
/**
* Exception that occurs when a InsufficientCapabilitiesException error is encountered
*/
class InsufficientCapabilitiesException extends CloudFormationException {}

View File

@ -0,0 +1,22 @@
<?php
/**
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
namespace Aws\CloudFormation\Exception;
/**
* Exception that occurs when a LimitExceededException error is encountered
*/
class LimitExceededException extends CloudFormationException {}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More