s3: Use better retry logic.
[hcoop/scripts.git] / s3.common
1 # -*- Shell-Script -*-
2 # Common functions for dealing with Amazon S3.
3
4 # units for BWLIMIT are KB/s
5 BWLIMIT=325
6 # units for CHUNKSIZE are MB
7 CHUNKSIZE=5000
8
9 BUCKET=hcoop.net-backups
10 BACKUPDIR=full
11 S3CMD=$(dirname $0)/s3
12
13 IFS=$'\n'
14
15 export S3_ACCESS_KEY_ID=$(cat ~mwolson_admin/.amazon/access.key)
16 export S3_SECRET_ACCESS_KEY=~mwolson_admin/.amazon/secret.key
17
18 function s3_cmd () {
19 # $1: command (get|put|ls|rm)
20 # $2: remote file
21 # $3: local file
22 local cmd=$1
23 shift
24 local bwarg
25 if test "$cmd" = "put"; then
26 bwarg="${BWLIMIT}K";
27 else
28 bwarg=
29 fi
30 $S3CMD $cmd $BUCKET "$1" "$2" $bwarg
31 }
32
33 function move_over () {
34 # Move file to its offsite destination.
35 # Expects the file to come from STDIN.
36 # $1: date subdirectory
37 # $2: filename
38 if test -z "$2" || test -n "$3"; then
39 echo "Bad programming"
40 exit 1
41 fi
42 local subdir=$1
43 local file=$2
44 local dest=$BACKUPDIR/$subdir
45 local ret
46 split -d -b ${CHUNKSIZE}m - ${file}.
47 for i in ${file}.*; do
48 echo "Transferring $i to S3 ..."
49 s3_cmd put $dest/$i $i
50 rm -f $i
51 done
52 }
53
54
55 function prune_old_backups () {
56 # Implement me
57 local subdir=$1
58 local oldpwd=$PWD
59 cd $BACKUPDIR
60 find . -mindepth 1 -maxdepth 1 -type d -ctime +7 \
61 -execdir rm -fr '{}' \; || true
62 rm -rf $SUBDIR
63 mkdir -p $SUBDIR
64 cd $oldpwd
65 }