mirror of
https://github.com/PrivateBin/PrivateBin.git
synced 2024-10-01 01:26:10 -04:00
Merge remote-tracking branch 'origin/master' into php8
This commit is contained in:
commit
5768b1e4bd
@ -5,6 +5,7 @@
|
|||||||
* CHANGED: Avoid `SUPER` privilege for setting the `sql_mode` for MariaDB/MySQL (#919)
|
* CHANGED: Avoid `SUPER` privilege for setting the `sql_mode` for MariaDB/MySQL (#919)
|
||||||
* FIXED: Revert to CREATE INDEX without IF NOT EXISTS clauses, to support MySQL (#943)
|
* FIXED: Revert to CREATE INDEX without IF NOT EXISTS clauses, to support MySQL (#943)
|
||||||
* FIXED: Apply table prefix to indexes as well, to support multiple instances sharing a single database (#943)
|
* FIXED: Apply table prefix to indexes as well, to support multiple instances sharing a single database (#943)
|
||||||
|
* ADDED: S3 Storage backend (#994)
|
||||||
* **1.4 (2022-04-09)**
|
* **1.4 (2022-04-09)**
|
||||||
* ADDED: Translations for Corsican, Estonian, Finnish and Lojban
|
* ADDED: Translations for Corsican, Estonian, Finnish and Lojban
|
||||||
* ADDED: new HTTP headers improving security (#765)
|
* ADDED: new HTTP headers improving security (#765)
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
* rodehoed - option to exempt ips from the rate-limiter
|
* rodehoed - option to exempt ips from the rate-limiter
|
||||||
* Mark van Holsteijn - Google Cloud Storage backend
|
* Mark van Holsteijn - Google Cloud Storage backend
|
||||||
* Austin Huang - Oracle database support
|
* Austin Huang - Oracle database support
|
||||||
|
* Felix J. Ogris - S3 Storage backend
|
||||||
|
|
||||||
## Translations
|
## Translations
|
||||||
* Hexalyse - French
|
* Hexalyse - French
|
||||||
|
39
INSTALL.md
39
INSTALL.md
@ -232,3 +232,42 @@ Platform using Google Cloud Run is easy and cheap.
|
|||||||
|
|
||||||
To use the Google Cloud Storage backend you have to install the suggested
|
To use the Google Cloud Storage backend you have to install the suggested
|
||||||
library using the command `composer require google/cloud-storage`.
|
library using the command `composer require google/cloud-storage`.
|
||||||
|
|
||||||
|
#### Using S3 Storage
|
||||||
|
Similar to Google Cloud Storage, you can choose S3 as storage backend. It uses
|
||||||
|
the AWS SDK for PHP, but can also talk to a Rados gateway as part of a CEPH
|
||||||
|
cluster. To use this backend, you first have to install the SDK in the
|
||||||
|
document root of PrivateBin: `composer require aws/aws-sdk-php`. You have to
|
||||||
|
create the S3 bucket on the CEPH cluster before using the S3 backend.
|
||||||
|
|
||||||
|
In the `[model]` section of cfg/conf.php, set `class` to `S3Storage`.
|
||||||
|
|
||||||
|
You can set any combination of the following options in the `[model_options]`
|
||||||
|
section:
|
||||||
|
|
||||||
|
* region
|
||||||
|
* version
|
||||||
|
* endpoint
|
||||||
|
* bucket
|
||||||
|
* prefix
|
||||||
|
* accesskey
|
||||||
|
* secretkey
|
||||||
|
* use_path_style_endpoint
|
||||||
|
|
||||||
|
By default, prefix is empty. If set, the S3 backend will place all PrivateBin
|
||||||
|
data beneath this prefix.
|
||||||
|
|
||||||
|
For AWS, you have to provide at least `region`, `bucket`, `accesskey`, and
|
||||||
|
`secretkey`.
|
||||||
|
|
||||||
|
For CEPH, follow this example:
|
||||||
|
|
||||||
|
```
|
||||||
|
region = ""
|
||||||
|
version = "2006-03-01"
|
||||||
|
endpoint = "https://s3.my-ceph.invalid"
|
||||||
|
use_path_style_endpoint = true
|
||||||
|
bucket = "my-bucket"
|
||||||
|
accesskey = "my-rados-user"
|
||||||
|
secretkey = "my-rados-pass"
|
||||||
|
```
|
||||||
|
@ -205,3 +205,25 @@ dir = PATH "data"
|
|||||||
;usr = "privatebin"
|
;usr = "privatebin"
|
||||||
;pwd = "Z3r0P4ss"
|
;pwd = "Z3r0P4ss"
|
||||||
;opt[12] = true ; PDO::ATTR_PERSISTENT
|
;opt[12] = true ; PDO::ATTR_PERSISTENT
|
||||||
|
|
||||||
|
;[model]
|
||||||
|
; example of S3 configuration for Rados gateway / CEPH
|
||||||
|
;class = S3Storage
|
||||||
|
;[model_options]
|
||||||
|
;region = ""
|
||||||
|
;version = "2006-03-01"
|
||||||
|
;endpoint = "https://s3.my-ceph.invalid"
|
||||||
|
;use_path_style_endpoint = true
|
||||||
|
;bucket = "my-bucket"
|
||||||
|
;accesskey = "my-rados-user"
|
||||||
|
;secretkey = "my-rados-pass"
|
||||||
|
|
||||||
|
;[model]
|
||||||
|
; example of S3 configuration for AWS
|
||||||
|
;class = S3Storage
|
||||||
|
;[model_options]
|
||||||
|
;region = "eu-central-1"
|
||||||
|
;version = "latest"
|
||||||
|
;bucket = "my-bucket"
|
||||||
|
;accesskey = "access key id"
|
||||||
|
;secretkey = "secret access key"
|
||||||
|
@ -30,7 +30,8 @@
|
|||||||
"mlocati/ip-lib" : "1.18.0"
|
"mlocati/ip-lib" : "1.18.0"
|
||||||
},
|
},
|
||||||
"suggest" : {
|
"suggest" : {
|
||||||
"google/cloud-storage" : "1.26.1"
|
"google/cloud-storage" : "1.26.1",
|
||||||
|
"aws/aws-sdk-php" : "3.239.0"
|
||||||
},
|
},
|
||||||
"require-dev" : {
|
"require-dev" : {
|
||||||
"phpunit/phpunit" : "^9"
|
"phpunit/phpunit" : "^9"
|
||||||
|
@ -157,6 +157,22 @@ class Configuration
|
|||||||
'prefix' => 'pastes',
|
'prefix' => 'pastes',
|
||||||
'uniformacl' => false,
|
'uniformacl' => false,
|
||||||
);
|
);
|
||||||
|
} elseif (
|
||||||
|
$section == 'model_options' && in_array(
|
||||||
|
$this->_configuration['model']['class'],
|
||||||
|
array('S3Storage')
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
$values = array(
|
||||||
|
'region' => null,
|
||||||
|
'version' => null,
|
||||||
|
'endpoint' => null,
|
||||||
|
'accesskey' => null,
|
||||||
|
'secretkey' => null,
|
||||||
|
'use_path_style_endpoint' => null,
|
||||||
|
'bucket' => null,
|
||||||
|
'prefix' => '',
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// "*_options" sections don't require all defaults to be set
|
// "*_options" sections don't require all defaults to be set
|
||||||
|
464
lib/Data/S3Storage.php
Normal file
464
lib/Data/S3Storage.php
Normal file
@ -0,0 +1,464 @@
|
|||||||
|
<?php
|
||||||
|
/**
|
||||||
|
* S3.php
|
||||||
|
*
|
||||||
|
* an S3 compatible data backend for PrivateBin with CEPH/RadosGW in mind
|
||||||
|
* see https://docs.ceph.com/en/latest/radosgw/s3/php/
|
||||||
|
* based on lib/Data/GoogleCloudStorage.php from PrivateBin version 1.4.0
|
||||||
|
*
|
||||||
|
* @link https://github.com/PrivateBin/PrivateBin
|
||||||
|
* @copyright 2022 Felix J. Ogris (https://ogris.de/)
|
||||||
|
* @license https://www.opensource.org/licenses/zlib-license.php The zlib/libpng License
|
||||||
|
* @version 1.4.1
|
||||||
|
*
|
||||||
|
* Installation:
|
||||||
|
* 1. Make sure you have composer.lock and composer.json in the document root of your PasteBin
|
||||||
|
* 2. If not, grab a copy from https://github.com/PrivateBin/PrivateBin
|
||||||
|
* 3. As non-root user, install the AWS SDK for PHP:
|
||||||
|
* composer require aws/aws-sdk-php
|
||||||
|
* (On FreeBSD, install devel/php-composer2 prior, e.g.: make -C /usr/ports/devel/php-composer2 install clean)
|
||||||
|
* 4. In cfg/conf.php, comment out all [model] and [model_options] settings
|
||||||
|
* 5. Still in cfg/conf.php, add a new [model] section:
|
||||||
|
* [model]
|
||||||
|
* class = S3Storage
|
||||||
|
* 6. Add a new [model_options] as well, e.g. for a Rados gateway as part of your CEPH cluster:
|
||||||
|
* [model_options]
|
||||||
|
* region = ""
|
||||||
|
* version = "2006-03-01"
|
||||||
|
* endpoint = "https://s3.my-ceph.invalid"
|
||||||
|
* use_path_style_endpoint = true
|
||||||
|
* bucket = "my-bucket"
|
||||||
|
* prefix = "privatebin" (place all PrivateBin data beneath this prefix)
|
||||||
|
* accesskey = "my-rados-user"
|
||||||
|
* secretkey = "my-rados-pass"
|
||||||
|
*/
|
||||||
|
|
||||||
|
namespace PrivateBin\Data;
|
||||||
|
|
||||||
|
use Aws\S3\Exception\S3Exception;
|
||||||
|
use Aws\S3\S3Client;
|
||||||
|
use PrivateBin\Json;
|
||||||
|
|
||||||
|
class S3Storage extends AbstractData
|
||||||
|
{
|
||||||
|
/**
|
||||||
|
* S3 client
|
||||||
|
*
|
||||||
|
* @access private
|
||||||
|
* @static
|
||||||
|
* @var S3Client
|
||||||
|
*/
|
||||||
|
private static $_client = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* S3 client options
|
||||||
|
*
|
||||||
|
* @access private
|
||||||
|
* @static
|
||||||
|
* @var array
|
||||||
|
*/
|
||||||
|
private static $_options = array();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* S3 bucket
|
||||||
|
*
|
||||||
|
* @access private
|
||||||
|
* @static
|
||||||
|
* @var string
|
||||||
|
*/
|
||||||
|
private static $_bucket = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* S3 prefix for all PrivateBin data in this bucket
|
||||||
|
*
|
||||||
|
* @access private
|
||||||
|
* @static
|
||||||
|
* @var string
|
||||||
|
*/
|
||||||
|
private static $_prefix = '';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns an S3 data backend.
|
||||||
|
*
|
||||||
|
* @access public
|
||||||
|
* @static
|
||||||
|
* @param array $options
|
||||||
|
* @return S3Storage
|
||||||
|
*/
|
||||||
|
public static function getInstance(array $options)
|
||||||
|
{
|
||||||
|
// if needed initialize the singleton
|
||||||
|
if (!(self::$_instance instanceof self)) {
|
||||||
|
self::$_instance = new self;
|
||||||
|
}
|
||||||
|
|
||||||
|
self::$_options = array();
|
||||||
|
self::$_options['credentials'] = array();
|
||||||
|
|
||||||
|
if (is_array($options) && array_key_exists('region', $options)) {
|
||||||
|
self::$_options['region'] = $options['region'];
|
||||||
|
}
|
||||||
|
if (is_array($options) && array_key_exists('version', $options)) {
|
||||||
|
self::$_options['version'] = $options['version'];
|
||||||
|
}
|
||||||
|
if (is_array($options) && array_key_exists('endpoint', $options)) {
|
||||||
|
self::$_options['endpoint'] = $options['endpoint'];
|
||||||
|
}
|
||||||
|
if (is_array($options) && array_key_exists('accesskey', $options)) {
|
||||||
|
self::$_options['credentials']['key'] = $options['accesskey'];
|
||||||
|
}
|
||||||
|
if (is_array($options) && array_key_exists('secretkey', $options)) {
|
||||||
|
self::$_options['credentials']['secret'] = $options['secretkey'];
|
||||||
|
}
|
||||||
|
if (is_array($options) && array_key_exists('use_path_style_endpoint', $options)) {
|
||||||
|
self::$_options['use_path_style_endpoint'] = filter_var($options['use_path_style_endpoint'], FILTER_VALIDATE_BOOLEAN);
|
||||||
|
}
|
||||||
|
if (is_array($options) && array_key_exists('bucket', $options)) {
|
||||||
|
self::$_bucket = $options['bucket'];
|
||||||
|
}
|
||||||
|
if (is_array($options) && array_key_exists('prefix', $options)) {
|
||||||
|
self::$_prefix = $options['prefix'];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (empty(self::$_client)) {
|
||||||
|
self::$_client = new S3Client(self::$_options);
|
||||||
|
}
|
||||||
|
|
||||||
|
return self::$_instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns all objects in the given prefix.
|
||||||
|
*
|
||||||
|
* @access private
|
||||||
|
* @param $prefix string with prefix
|
||||||
|
* @return array all objects in the given prefix
|
||||||
|
*/
|
||||||
|
private function _listAllObjects($prefix)
|
||||||
|
{
|
||||||
|
$allObjects = array();
|
||||||
|
$options = array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Prefix' => $prefix,
|
||||||
|
);
|
||||||
|
|
||||||
|
do {
|
||||||
|
$objectsListResponse = self::$_client->listObjects($options);
|
||||||
|
$objects = $objectsListResponse['Contents'] ?? array();
|
||||||
|
foreach ($objects as $object) {
|
||||||
|
$allObjects[] = $object;
|
||||||
|
$options['Marker'] = $object['Key'];
|
||||||
|
}
|
||||||
|
} while ($objectsListResponse['IsTruncated']);
|
||||||
|
|
||||||
|
return $allObjects;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns the S3 storage object key for $pasteid in self::$_bucket.
|
||||||
|
*
|
||||||
|
* @access private
|
||||||
|
* @param $pasteid string to get the key for
|
||||||
|
* @return string
|
||||||
|
*/
|
||||||
|
private function _getKey($pasteid)
|
||||||
|
{
|
||||||
|
if (self::$_prefix != '') {
|
||||||
|
return self::$_prefix . '/' . $pasteid;
|
||||||
|
}
|
||||||
|
return $pasteid;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Uploads the payload in the self::$_bucket under the specified key.
|
||||||
|
* The entire payload is stored as a JSON document. The metadata is replicated
|
||||||
|
* as the S3 object's metadata except for the fields attachment, attachmentname
|
||||||
|
* and salt.
|
||||||
|
*
|
||||||
|
* @param $key string to store the payload under
|
||||||
|
* @param $payload array to store
|
||||||
|
* @return bool true if successful, otherwise false.
|
||||||
|
*/
|
||||||
|
private function _upload($key, $payload)
|
||||||
|
{
|
||||||
|
$metadata = array_key_exists('meta', $payload) ? $payload['meta'] : array();
|
||||||
|
unset($metadata['attachment'], $metadata['attachmentname'], $metadata['salt']);
|
||||||
|
foreach ($metadata as $k => $v) {
|
||||||
|
$metadata[$k] = strval($v);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
self::$_client->putObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $key,
|
||||||
|
'Body' => Json::encode($payload),
|
||||||
|
'ContentType' => 'application/json',
|
||||||
|
'Metadata' => $metadata,
|
||||||
|
));
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
error_log('failed to upload ' . $key . ' to ' . self::$_bucket . ', ' .
|
||||||
|
trim(preg_replace('/\s\s+/', ' ', $e->getMessage())));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function create($pasteid, array $paste)
|
||||||
|
{
|
||||||
|
if ($this->exists($pasteid)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return $this->_upload($this->_getKey($pasteid), $paste);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function read($pasteid)
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
$object = self::$_client->getObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $this->_getKey($pasteid),
|
||||||
|
));
|
||||||
|
$data = $object['Body']->getContents();
|
||||||
|
return Json::decode($data);
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
error_log('failed to read ' . $pasteid . ' from ' . self::$_bucket . ', ' .
|
||||||
|
trim(preg_replace('/\s\s+/', ' ', $e->getMessage())));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function delete($pasteid)
|
||||||
|
{
|
||||||
|
$name = $this->_getKey($pasteid);
|
||||||
|
|
||||||
|
try {
|
||||||
|
$comments = $this->_listAllObjects($name . '/discussion/');
|
||||||
|
foreach ($comments as $comment) {
|
||||||
|
try {
|
||||||
|
self::$_client->deleteObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $comment['Key'],
|
||||||
|
));
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
// ignore if already deleted.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
// there are no discussions associated with the paste
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
self::$_client->deleteObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $name,
|
||||||
|
));
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
// ignore if already deleted
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function exists($pasteid)
|
||||||
|
{
|
||||||
|
return self::$_client->doesObjectExistV2(self::$_bucket, $this->_getKey($pasteid));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function createComment($pasteid, $parentid, $commentid, array $comment)
|
||||||
|
{
|
||||||
|
if ($this->existsComment($pasteid, $parentid, $commentid)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
$key = $this->_getKey($pasteid) . '/discussion/' . $parentid . '/' . $commentid;
|
||||||
|
return $this->_upload($key, $comment);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function readComments($pasteid)
|
||||||
|
{
|
||||||
|
$comments = array();
|
||||||
|
$prefix = $this->_getKey($pasteid) . '/discussion/';
|
||||||
|
try {
|
||||||
|
$entries = $this->_listAllObjects($prefix);
|
||||||
|
foreach ($entries as $entry) {
|
||||||
|
$object = self::$_client->getObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $entry['Key'],
|
||||||
|
));
|
||||||
|
$body = JSON::decode($object['Body']->getContents());
|
||||||
|
$items = explode('/', $entry['Key']);
|
||||||
|
$body['id'] = $items[3];
|
||||||
|
$body['parentid'] = $items[2];
|
||||||
|
$slot = $this->getOpenSlot($comments, (int) $object['Metadata']['created']);
|
||||||
|
$comments[$slot] = $body;
|
||||||
|
}
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
// no comments found
|
||||||
|
}
|
||||||
|
return $comments;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function existsComment($pasteid, $parentid, $commentid)
|
||||||
|
{
|
||||||
|
$name = $this->_getKey($pasteid) . '/discussion/' . $parentid . '/' . $commentid;
|
||||||
|
return self::$_client->doesObjectExistV2(self::$_bucket, $name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function purgeValues($namespace, $time)
|
||||||
|
{
|
||||||
|
$path = self::$_prefix;
|
||||||
|
if ($path != '') {
|
||||||
|
$path .= '/';
|
||||||
|
}
|
||||||
|
$path .= 'config/' . $namespace;
|
||||||
|
|
||||||
|
try {
|
||||||
|
foreach ($this->_listAllObjects($path) as $object) {
|
||||||
|
$name = $object['Key'];
|
||||||
|
if (strlen($name) > strlen($path) && substr($name, strlen($path), 1) !== '/') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
$head = self::$_client->headObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $name,
|
||||||
|
));
|
||||||
|
if (array_key_exists('Metadata', $head) && array_key_exists('value', $head['Metadata'])) {
|
||||||
|
$value = $head['Metadata']['value'];
|
||||||
|
if (is_numeric($value) && intval($value) < $time) {
|
||||||
|
try {
|
||||||
|
self::$_client->deleteObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $name,
|
||||||
|
));
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
// deleted by another instance.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
// no objects in the bucket yet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For S3, the value will also be stored in the metadata for the
|
||||||
|
* namespaces traffic_limiter and purge_limiter.
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function setValue($value, $namespace, $key = '')
|
||||||
|
{
|
||||||
|
$prefix = self::$_prefix;
|
||||||
|
if ($prefix != '') {
|
||||||
|
$prefix .= '/';
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($key === '') {
|
||||||
|
$key = $prefix . 'config/' . $namespace;
|
||||||
|
} else {
|
||||||
|
$key = $prefix . 'config/' . $namespace . '/' . $key;
|
||||||
|
}
|
||||||
|
|
||||||
|
$metadata = array('namespace' => $namespace);
|
||||||
|
if ($namespace != 'salt') {
|
||||||
|
$metadata['value'] = strval($value);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
self::$_client->putObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $key,
|
||||||
|
'Body' => $value,
|
||||||
|
'ContentType' => 'application/json',
|
||||||
|
'Metadata' => $metadata,
|
||||||
|
));
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
error_log('failed to set key ' . $key . ' to ' . self::$_bucket . ', ' .
|
||||||
|
trim(preg_replace('/\s\s+/', ' ', $e->getMessage())));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
public function getValue($namespace, $key = '')
|
||||||
|
{
|
||||||
|
$prefix = self::$_prefix;
|
||||||
|
if ($prefix != '') {
|
||||||
|
$prefix .= '/';
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($key === '') {
|
||||||
|
$key = $prefix . 'config/' . $namespace;
|
||||||
|
} else {
|
||||||
|
$key = $prefix . 'config/' . $namespace . '/' . $key;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
$object = self::$_client->getObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $key,
|
||||||
|
));
|
||||||
|
return $object['Body']->getContents();
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @inheritDoc
|
||||||
|
*/
|
||||||
|
protected function _getExpiredPastes($batchsize)
|
||||||
|
{
|
||||||
|
$expired = array();
|
||||||
|
$now = time();
|
||||||
|
$prefix = self::$_prefix;
|
||||||
|
if ($prefix != '') {
|
||||||
|
$prefix .= '/';
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
foreach ($this->_listAllObjects($prefix) as $object) {
|
||||||
|
$head = self::$_client->headObject(array(
|
||||||
|
'Bucket' => self::$_bucket,
|
||||||
|
'Key' => $object['Key'],
|
||||||
|
));
|
||||||
|
if (array_key_exists('Metadata', $head) && array_key_exists('expire_date', $head['Metadata'])) {
|
||||||
|
$expire_at = intval($head['Metadata']['expire_date']);
|
||||||
|
if ($expire_at != 0 && $expire_at < $now) {
|
||||||
|
array_push($expired, $object['Key']);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (count($expired) > $batchsize) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (S3Exception $e) {
|
||||||
|
// no objects in the bucket yet
|
||||||
|
}
|
||||||
|
return $expired;
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user