2 # basic amazon s3 operations
3 # Licensed under the terms of the GNU GPL v2
4 # Copyright 2007 Victor Lowther <victor.lowther@gmail.com>
6 CURL
=/home
/mwolson_admin
/bin
/curl
7 HMAC
=$
(dirname $0)/s3-hmac
10 # print a message and bail
16 # check to see if the variable name passed exists and holds a value.
19 [[ ${!1} ]] || die
"Environment variable ${1} is not set."
22 # check to see if we have all the needed S3 variables defined.
26 for x
in S3_ACCESS_KEY_ID S3_SECRET_ACCESS_KEY
; do
29 [[ -f ${S3_SECRET_ACCESS_KEY} ]] || die
"S3_SECRET_ACCESS_KEY must point to a file!"
30 sak
="$(wc -c "${S3_SECRET_ACCESS_KEY}")"
31 (( ${sak%%[!0-9 ]*} == 40 )) || \
32 die
"S3 Secret Access Key is not exactly 40 bytes long. Please fix it."
34 # check to see if our external dependencies exist
37 while [[ $# -ne 0 ]]; do
38 which "${1}" >& /dev/null || { res=1; echo "${1} not found."; }
41 (( res == 0 )) || die "aborting.
"
45 if test ! -f $HMAC || test ! -x $HMAC; then
46 die "hmac
script not found or not executable.
"
51 check_dep openssl date cat grep
57 # $1 = string to url encode
59 # we don't urlencode everything, just enough stuff.
77 # accept input on stdin, put it on stdout.
78 # patches accepted to get more stuff
85 ## basic S3 functionality. x-amz-header functionality is not implemented.
86 # make an S3 signature string, which will be output on stdout.
87 s3_signature_string() {
89 # $2 = date string, must be in UTC
90 # $3 = bucket name, if any
91 # $4 = resource path, if any
92 # $5 = content md5, if any
93 # $6 = content MIME type, if any
94 # $7 = canonicalized headers, if any
95 # signature string will be output on stdout
96 local verr="Must pass a verb to s3_signature_string
!"
97 local verb="${1:?verr}"
100 local derr="Must pass a
date to s3_signature_string
!"
101 local date="${2:?derr}"
105 printf "%s
\n%s
\n%s
\n%s
\n%s
%s
%s
" \
106 "${verb}" "${md5}" "${mime}" "${date}" \
107 "${headers}" "${bucket}" "${resource}" | \
108 $HMAC sha1 "${S3_SECRET_ACCESS_KEY}" | openssl base64 -e -a
111 # cheesy, but it is the best way to have multiple headers.
113 # each arg passed will be output on its own line
116 echo "header
= \"${1}\""
122 # invoke curl to do all the heavy HTTP lifting
123 # $1 = method (one of GET, PUT, or DELETE. HEAD is not handled yet.)
124 # $2 = remote bucket.
127 # $5 = bandwidth limit.
128 local bucket remote date sig md5 arg inout headers tries ret
129 # header handling is kinda fugly, but it works.
130 bucket="${2:+/${2}}/" # slashify the bucket
131 remote="$
(urlenc
"${3}")" # if you don't, strange things may happen.
132 stdopts="--connect-timeout 10 --fail --silent"
133 [[ $CURL_S3_DEBUG == true ]] && stdopts="${stdopts} --show-error --fail"
134 test -n "${5}" && stdopts="${stdopts} --limit-rate ${5}"
136 GET) arg="-o" inout="${4:--}" # stdout if no $4
138 PUT) [[ ${2} ]] || die "PUT can has bucket?
"
139 if [[ ! ${3} ]]; then
141 headers[${#headers[@]}]="Content-Length
: 0"
142 elif [[ -f ${4} ]]; then
143 md5="$
(openssl dgst
-md5 -binary "${4}"|openssl base64
-e -a)"
144 arg="-T" inout="${4}"
145 headers[${#headers[@]}]="Expect
: 100-continue"
147 die "Cannot
write non-existing
file ${4}"
150 DELETE) arg="-X DELETE
"
153 *) die "Unknown verb
${1}. It probably would not have worked anyways.
" ;;
155 date="$
(TZ
=UTC
date '+%a, %e %b %Y %H:%M:%S %z')"
156 sig=$(s3_signature_string ${1} "${date}" "${bucket}" "${remote}" "${md5}")
158 headers
[${#headers[@]}]="Authorization: AWS ${S3_ACCESS_KEY_ID}:${sig}"
159 headers[${#headers[@]}]="Date
: ${date}"
160 [[ ${md5} ]] && headers[${#headers[@]}]="Content-MD5: ${md5}"
162 while test $tries -lt 3; do
163 tries
=$
(expr $tries + 1)
164 $CURL ${arg} "${inout}" ${stdopts} -K <(curl_headers "${headers[@]}") \
165 "http
://s3.amazonaws.com
${bucket}${remote}"
167 test $ret -eq 0 && break;
173 # $1 = remote bucket to put it into
174 # $2 = remote name to put
175 # $3 = file to put. This must be present if $2 is.
176 # $4 = bandwidth limit.
177 s3_curl PUT "${1}" "${2}" "${3:-${2}}" "${4}"
182 # $1 = bucket to get file from
183 # $2 = remote file to get
184 # $3 = local file to get into. Will be overwritten if it exists.
185 # If this contains a path, that path must exist before calling this.
186 # $4 = bandwidth limit.
187 s3_curl GET "${1}" "${2}" "${3:-${2}}" "${4}"
192 # same args as s3_get, but uses the HEAD verb instead of the GET verb.
193 s3_curl HEAD "${1}" "${2}" >/dev/null
197 # Hideously ugly, but it works well enough.
199 s3_get |grep -o '<Name>[^>]*</Name>' |sed 's/<[^>]*>//g' |xmldec
203 # this will only return the first thousand entries, alas
204 # Mabye some kind soul can fix this without writing an XML parser in bash?
205 # Also need to add xml entity handling.
207 # $1 = bucket to list
208 [ "x
${1}" == "x
" ] && return 1
209 s3_get "${1}" |grep -o '<Key>[^>]*</Key>' |sed 's/<[^>]*>//g'| xmldec
214 # $1 = bucket to delete from
215 # $2 = item to delete
216 s3_curl DELETE "${1}" "${2}"
220 # because this uses s3_list, it suffers from the same flaws.
222 # $1 = bucket to delete everything from
223 s3_list "${1}" | while read f; do
224 s3_delete "${1}" "${f}";
230 put) shift; s3_put "$@
" ;;
231 get) shift; s3_get "$@
" ;;
232 rm) shift; s3_delete "$@
" ;;
233 ls) shift; s3_list "$@
" ;;
234 test) shift; s3_test "$@
" ;;
235 buckets) s3_buckets ;;
236 rmrf) shift; s3_rmrf "$@
" ;;
237 *) die "Unknown
command ${1}.
"