hcoop-backup: Try to make xargs die if an error occurred.
[clinton/scripts.git] / s3
CommitLineData
a0d92e63 1#!/bin/bash
2# basic amazon s3 operations
3# Licensed under the terms of the GNU GPL v2
4# Copyright 2007 Victor Lowther <victor.lowther@gmail.com>
5
6228995d 6CURL=/home/mwolson_admin/bin/curl
c347f520 7HMAC=$(dirname $0)/s3-hmac
7cc8af57 8ATTEMPTS=3
a0d92e63 9
10# print a message and bail
11die() {
12 echo $*
13 exit 1
14}
15
16# check to see if the variable name passed exists and holds a value.
17# Die if it does not.
18check_or_die() {
19 [[ ${!1} ]] || die "Environment variable ${1} is not set."
20}
21
22# check to see if we have all the needed S3 variables defined.
23# Bail if we do not.
24check_s3() {
25 local sak x
26 for x in S3_ACCESS_KEY_ID S3_SECRET_ACCESS_KEY; do
27 check_or_die ${x};
28 done
29 [[ -f ${S3_SECRET_ACCESS_KEY} ]] || die "S3_SECRET_ACCESS_KEY must point to a file!"
30 sak="$(wc -c "${S3_SECRET_ACCESS_KEY}")"
31 (( ${sak%%[!0-9 ]*} == 40 )) || \
32 die "S3 Secret Access Key is not exactly 40 bytes long. Please fix it."
33}
34# check to see if our external dependencies exist
35check_dep() {
36 local res=0
37 while [[ $# -ne 0 ]]; do
38 which "${1}" >& /dev/null || { res=1; echo "${1} not found."; }
39 shift
40 done
41 (( res == 0 )) || die "aborting."
42}
43
c347f520 44check_hmac() {
45 if test ! -f $HMAC || test ! -x $HMAC; then
46 die "hmac script not found or not executable."
47 fi
48}
49
a0d92e63 50check_deps() {
6228995d 51 check_dep openssl date cat grep
c347f520 52 check_hmac
a0d92e63 53 check_s3
54}
55
56urlenc() {
57 # $1 = string to url encode
58 # output is on stdout
59 # we don't urlencode everything, just enough stuff.
60 echo -n "${1}" |
61 sed 's/%/%25/g
62 s/ /%20/g
63 s/#/%23/g
64 s/\$/%24/g
65 s/\&/%26/g
66 s/+/%2b/g
67 s/,/%2c/g
68 s/:/%3a/g
69 s/;/%3b/g
70 s/?/%3f/g
71 s/@/%40/g
72 s/ /%09/g'
73}
74
75xmldec() {
76 # no parameters.
77 # accept input on stdin, put it on stdout.
78 # patches accepted to get more stuff
79 sed 's/\&quot;/\"/g
80 s/\&amp;/\&/g
81 s/\&lt;/</g
82 s/\&gt;/>/g'
83}
84
85## basic S3 functionality. x-amz-header functionality is not implemented.
86# make an S3 signature string, which will be output on stdout.
87s3_signature_string() {
88 # $1 = HTTP verb
89 # $2 = date string, must be in UTC
90 # $3 = bucket name, if any
91 # $4 = resource path, if any
92 # $5 = content md5, if any
93 # $6 = content MIME type, if any
94 # $7 = canonicalized headers, if any
95 # signature string will be output on stdout
96 local verr="Must pass a verb to s3_signature_string!"
97 local verb="${1:?verr}"
98 local bucket="${3}"
99 local resource="${4}"
100 local derr="Must pass a date to s3_signature_string!"
101 local date="${2:?derr}"
102 local mime="${6}"
103 local md5="${5}"
104 local headers="${7}"
105 printf "%s\n%s\n%s\n%s\n%s%s%s" \
106 "${verb}" "${md5}" "${mime}" "${date}" \
107 "${headers}" "${bucket}" "${resource}" | \
c347f520 108 $HMAC sha1 "${S3_SECRET_ACCESS_KEY}" | openssl base64 -e -a
a0d92e63 109}
110
111# cheesy, but it is the best way to have multiple headers.
112curl_headers() {
113 # each arg passed will be output on its own line
114 local parms=$#
115 for ((;$#;)); do
116 echo "header = \"${1}\""
117 shift
118 done
119}
120
121s3_curl() {
122 # invoke curl to do all the heavy HTTP lifting
123 # $1 = method (one of GET, PUT, or DELETE. HEAD is not handled yet.)
124 # $2 = remote bucket.
125 # $3 = remote name
126 # $4 = local name.
f8c2d5c6 127 # $5 = bandwidth limit.
7cc8af57 128 local bucket remote date sig md5 arg inout headers tries ret
a0d92e63 129 # header handling is kinda fugly, but it works.
130 bucket="${2:+/${2}}/" # slashify the bucket
131 remote="$(urlenc "${3}")" # if you don't, strange things may happen.
132 stdopts="--connect-timeout 10 --fail --silent"
133 [[ $CURL_S3_DEBUG == true ]] && stdopts="${stdopts} --show-error --fail"
f8c2d5c6 134 test -n "${5}" && stdopts="${stdopts} --limit-rate ${5}"
a0d92e63 135 case "${1}" in
136 GET) arg="-o" inout="${4:--}" # stdout if no $4
137 ;;
138 PUT) [[ ${2} ]] || die "PUT can has bucket?"
139 if [[ ! ${3} ]]; then
140 arg="-X PUT"
141 headers[${#headers[@]}]="Content-Length: 0"
142 elif [[ -f ${4} ]]; then
143 md5="$(openssl dgst -md5 -binary "${4}"|openssl base64 -e -a)"
144 arg="-T" inout="${4}"
145 headers[${#headers[@]}]="Expect: 100-continue"
146 else
147 die "Cannot write non-existing file ${4}"
148 fi
149 ;;
150 DELETE) arg="-X DELETE"
151 ;;
152 HEAD) arg="-I" ;;
153 *) die "Unknown verb ${1}. It probably would not have worked anyways." ;;
154 esac
155 date="$(TZ=UTC date '+%a, %e %b %Y %H:%M:%S %z')"
156 sig=$(s3_signature_string ${1} "${date}" "${bucket}" "${remote}" "${md5}")
157
158 headers[${#headers[@]}]="Authorization: AWS ${S3_ACCESS_KEY_ID}:${sig}"
159 headers[${#headers[@]}]="Date: ${date}"
160 [[ ${md5} ]] && headers[${#headers[@]}]="Content-MD5: ${md5}"
7cc8af57 161 tries=0
162 while test $tries -lt 3; do
163 tries=$(expr $tries + 1)
6228995d 164 $CURL ${arg} "${inout}" ${stdopts} -K <(curl_headers "${headers[@]}") \
a0d92e63 165 "http://s3.amazonaws.com${bucket}${remote}"
7cc8af57 166 ret=$?
167 test $ret -eq 0 && break;
168 done
169 return $ret
a0d92e63 170}
171
172s3_put() {
173 # $1 = remote bucket to put it into
174 # $2 = remote name to put
175 # $3 = file to put. This must be present if $2 is.
c2b40851 176 # $4 = bandwidth limit.
f8c2d5c6 177 s3_curl PUT "${1}" "${2}" "${3:-${2}}" "${4}"
a0d92e63 178 return $?
179}
180
181s3_get() {
182 # $1 = bucket to get file from
183 # $2 = remote file to get
184 # $3 = local file to get into. Will be overwritten if it exists.
185 # If this contains a path, that path must exist before calling this.
c2b40851 186 # $4 = bandwidth limit.
f8c2d5c6 187 s3_curl GET "${1}" "${2}" "${3:-${2}}" "${4}"
a0d92e63 188 return $?
189}
190
191s3_test() {
192 # same args as s3_get, but uses the HEAD verb instead of the GET verb.
193 s3_curl HEAD "${1}" "${2}" >/dev/null
194 return $?
195}
196
197# Hideously ugly, but it works well enough.
198s3_buckets() {
199 s3_get |grep -o '<Name>[^>]*</Name>' |sed 's/<[^>]*>//g' |xmldec
200 return $?
201}
202
203# this will only return the first thousand entries, alas
204# Mabye some kind soul can fix this without writing an XML parser in bash?
205# Also need to add xml entity handling.
206s3_list() {
207 # $1 = bucket to list
208 [ "x${1}" == "x" ] && return 1
209 s3_get "${1}" |grep -o '<Key>[^>]*</Key>' |sed 's/<[^>]*>//g'| xmldec
210 return $?
211}
212
213s3_delete() {
214 # $1 = bucket to delete from
215 # $2 = item to delete
216 s3_curl DELETE "${1}" "${2}"
217 return $?
218}
219
220# because this uses s3_list, it suffers from the same flaws.
221s3_rmrf() {
222 # $1 = bucket to delete everything from
223 s3_list "${1}" | while read f; do
224 s3_delete "${1}" "${f}";
225 done
226}
227
228check_deps
229case $1 in
230 put) shift; s3_put "$@" ;;
231 get) shift; s3_get "$@" ;;
232 rm) shift; s3_delete "$@" ;;
233 ls) shift; s3_list "$@" ;;
234 test) shift; s3_test "$@" ;;
235 buckets) s3_buckets ;;
236 rmrf) shift; s3_rmrf "$@" ;;
237 *) die "Unknown command ${1}."
238 ;;
239esac
240