I recently discovered the benefits with autofs and struggled with some issues on mounting S3 buckets. I didn’t find anything similar so I wrote auto.s3 which is now capable of using FUSE s3fs and goofyfs.
auto.s3 uses AWS CLI and jq to resolve a user-mountspace to /home/<user>/Remote/S3/<aws-profile>/<bucket>/**
using correct file and directory permissions.
The scripts currently run on my Ubuntu Bionic Beaver but it should be possible to use it on other distributions without minimal work. For OSX – nah… pay me!
Please read the comments included in the files!
/etc/auto.s3
#!/bin/bash
##
# AutoFS user-folder indirect Automounter for S3 using either FUSE goofyfs or s3fs (0.1)
#
# ----------------------------------------------------------------------------
# "THE FUCK-WARE LICENSE" (Revision 1):
# <mg@evolution515.net> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return or have sex with me,
# or both.
# ----------------------------------------------------------------------------
#
# Requirements
# - AWS CLI installed
# - JQ installed
# - Either FUSE goofyfs or s3fs installed
#
# Usage
# - place config to $S3FS_CONFIG directory using s3fs config format (ACCESS_KEY:ACCESS_SECRET)
# - place this file to /etc/auto.s3 and make it executable
# - add to /etc/auto.master: /home/<user>/Remote/S3 /etc/auto.s3 --timeout=3000
# - choose backend by config section in this file (NOTE: goofyfs needs )
# - cd <mountpoint>/<aws-profile>/<bucket>
#
# Debugging
# - Stop system service by:
# systemctl stop autofs
# - Execute as process (use --debug to see mount commands)
# automount -f -v
#
# Clean up mountpoints (when autofs hangs or mountpoints are still used)
# mount | grep autofs | cut -d' ' -f 3 | xargs umount -l
#
# Logging
# - Logs go to syslog except you are running automount within TTY
#
# Notes
# - goofyfs makes sometimes trouble - use s3fs!
# - Daemon needs to run by root since we only root has access to all mount options
# - Additional entries can be defined with the -Dvariable=Value map-option to automount(8).
# - Alternative fuse style mount can be done by -fstype=fuse,allow_other :sshfs\#user@example.com\:/path/to/mount
# - We do not read out .aws/config since not all credentials do necessary have S3-access
# - https://github.com/kahing/goofys/pull/91/commits/07dffdbda4ff7fc3c538cb07e58ad12cc464b628
# - goofyfs catfs cache is not activated by default
# - chown/chmod is not that nyce but works ;9
# - other backends not planned at the moment
#
# AWS Commands
# - aws s3api list-buckets
# - aws s3api list-objects --bucket <bucket>
#
# FAQ
# - https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ
#
# Autofs provides additional variables that are set based on the user requesting the mount:
#
# USER The user login name
# UID The user login ID
# GROUP The user group name
# GID The user group ID
# HOME The user home directory
# HOST Hostname (uname -n)
#
# From exports
#
# AUTOFS_GID="1000"
# AUTOFS_GROUP="ctang"
# AUTOFS_HOME="/home/ctang"
# AUTOFS_SHOST="refpad-16"
# AUTOFS_UID="1000"
# AUTOFS_USER="ctang"
#
# Strict mode
set -euo pipefail -o errtrace
# Config
S3FS_CONFIG="${AUTOFS_HOME:-$HOME}/.autofs/s3fs" # user directory
BACKEND="goofyfs" # s3fs|goofyfs - not goofyfs requires goofyfs-fuse!
DEBUG=0 # 0|1 where 1 is on - output will go to syslog or journald
UMASK="750" # Umask for mountpoint placeholder directories
OPTS="defaults,noatime" # mount options
if [[ -z "${GID:-}" ]]; then
GID="$(id -g)"
fi
# We ensure every command output can be parsed in neutral form
export LC_ALL=C
export AWS_SDK_LOAD_CONFIG=0
# Const
PWD="$(pwd)"
SCRIPT_NAME=`basename "$0"`
LOGGER_CMD="logger -i -t ${SCRIPT_NAME}"
if test -t 1; then
# if tty
LOGGER_CMD="${LOGGER_CMD} --no-act --stderr"
fi
PROFILES=()
if ! which jq 1>/dev/null 2>&1; then
$LOGGER_CMD "Cannot find jq binary"
exit 1
fi
if ! which aws 1>/dev/null 2>&1; then
$LOGGER_CMD "Cannot find aws binary"
exit 1
fi
# If use is already in a mount point this script will be called by root
# so we need to remap some stuff
if [[ ! "${HOME:-}" == "${PWD}/"* ]] && [[ "${PWD}" =~ ^(/home/[^/]+) ]]; then
S3FS_CONFIG=${S3FS_CONFIG/${AUTOFS_HOME:-$HOME}/${BASH_REMATCH[1]}}
HOME="${BASH_REMATCH[1]}"
USER="${HOME##*/}"
AUTOFS_UID="$(id -u ${USER})"
AUTOFS_GID="$(id -g ${USER})"
$LOGGER_CMD "Initializing. Remapping home to ${HOME}, user=${USER}, config=${S3FS_CONFIG}"
fi
# Prevent errors
if [[ ! -d ${S3FS_CONFIG} ]]; then
$LOGGER_CMD "Config directory ${S3FS_CONFIG} not found."
exit 1
fi
# Mountpoint needs to be owned by user
chown -R ${AUTOFS_UID:-$UID}:${AUTOFS_GID:-$GID} "${S3FS_CONFIG}"
chmod -R 700 "${S3FS_CONFIG}"
# Create indirect mount points for s3 profiles
PROFILES=($(ls -1 ${S3FS_CONFIG}))
if [[ -z "${PROFILES[*]}" ]]; then
$LOGGER_CMD "No profiles found within ${S3FS_CONFIG}"
else
for profile in "${PROFILES[@]}"; do
chmod 600 ${S3FS_CONFIG}/${profile}
if [[ ! -d "${PWD}/${profile}" ]]; then
$LOGGER_CMD "Creating ${PWD}/${profile}"
mkdir -p "${PWD}/${profile}" || true > /dev/null
chmod ${UMASK} "${PWD}/${profile}"
chown ${AUTOFS_UID:-$UID}:${AUTOFS_GID:-$GID} "${PWD}/${profile}"
fi
done
fi
# Requested profile
PROFILE="${1:-}"
if [[ ! -e "${S3FS_CONFIG}/${PROFILE}" ]]; then
$LOGGER_CMD "No valid profile=${PROFILE} given! "
exit 1
fi
$LOGGER_CMD "Profile: $@"
if [[ -z "${PROFILE}" ]]; then
$LOGGER_CMD "No profile given"
exit 1
fi
if [[ "${BACKEND}" == "s3fs" ]]; then
if ! which s3fs 1>/dev/null 2>&1; then
$LOGGER_CMD "Cannot find s3fs installation"
exit 1
fi
OPTS="-fstype=fuse.s3fs,uid=${AUTOFS_UID:-${UID}},gid=${AUTOFS_UID:-${GID}},umask=000,${OPTS},_netdev,allow_other,default_permissions,passwd_file=${S3FS_CONFIG}/${PROFILE},use_cache=$(mktemp -d)"
if [[ "$DEBUG" -eq 1 ]]; then
OPTS="${OPTS},dbglevel=info,curldbg"
fi
elif [[ "${BACKEND}" == "goofyfs" ]]; then
if ! which s3fs 1>/dev/null 2>&1; then
$LOGGER_CMD "Cannot find goofyfs installation"
exit 1
fi
OPTS="-fstype=fuse.goofyfs-fuse,${OPTS},_netdev,nonempty,allow_other,passwd_file=${S3FS_CONFIG}/${PROFILE},--file-mode=0666,nls=utf8"
if [[ "${DEBUG}" -eq 1 ]]; then
OPTS="${OPTS},--debug_s3,--debug_fuse"
fi
else
$LOGGER_CMD "Unsupported backend ${BACKEND}"
exit 1
fi
read -r -d '' CREDENTIALS < ${S3FS_CONFIG}/${PROFILE}
export AWS_ACCESS_KEY_ID="${CREDENTIALS%%:*}"
export AWS_SECRET_ACCESS_KEY="${CREDENTIALS##*:}"
BUCKETS=($(aws s3api list-buckets --output json | jq -r '.Buckets[].Name'))
printf "%s\n" "${BUCKETS[@]}" | awk -v "opts=${OPTS}" -F '|' -- '
BEGIN { ORS=""; first=1 }
{
if (first)
print opts; first=0
bucket = $1
# Enclose mount dir and location in quotes
# Double quote "$" in location as it is special
gsub(/\$$/, "\\$", loc);
gsub(/\&/,"\\\\&",loc)
print " \\\n\t \"/" bucket "\"", "\":" bucket "\""
# print " \\\n\t " bucket, ":"bucket
}
END { if (!first) print "\n"; else exit 1 }
'
/usr/local/bin/goofyfs-fuse
#!/bin/bash
##
# GoofyFS - FUSE wrapper
#
# supports passwd_file argument analogue to s3fs since goofyfs does not support custom credentials file
# (see https://github.com/kahing/goofys/pull/91/commits/07dffdbda4ff7fc3c538cb07e58ad12cc464b628)
#
# usage: mount \
# -t fuse.goofyfs-fuse -o allow_other,--passwd_file=/home/ctang/.autofs/s3fs/<profile>,--file-mode=0666,nls=utf8,--debug_s3,--debug_fuse \
# <bucket> \
# <mountpoint>
# Strict mode
set -euo pipefail -o errtrace
ARGS=()
PASSWD_FILE=""
BUCKET=""
MOUNTPOINT=""
while (($#)); do
case "$1" in
-t)
shift 1
ARGS+=("-t" "fuse.goofyfs")
;;
-o)
shift 1
OPTS=()
for arg in $( echo "${1}" | tr ',' '\n' ); do
case "${arg}" in
--passwd_file=*)
PASSWD_FILE="${arg##*=}"
;;
--*)
ARGS+=($( echo "${arg}" | tr '=' '\n' ))
;;
*)
OPTS+=("${arg}")
esac
done
opts="$(printf "%b," ${OPTS[@]})"
ARGS+=("-o" "${opts%,}")
;;
*)
if [[ -z "${BUCKET}" ]]; then
BUCKET="$1"
elif [[ -z "${MOUNTPOINT}" ]]; then
MOUNTPOINT="$1"
else
ARGS+=("$1")
fi
;;
esac
shift 1
done
ARGS+=("${BUCKET}" "${MOUNTPOINT}")
export AWS_SDK_LOAD_CONFIG=0
if [[ -n "${PASSWD_FILE}" ]]; then
read -r -d '' CREDENTIALS < ${PASSWD_FILE}
export AWS_ACCESS_KEY_ID="${CREDENTIALS%%:*}"
export AWS_SECRET_ACCESS_KEY="${CREDENTIALS##*:}"
fi
goofyfs ${ARGS[@]}
This looks awesome.
Any chance that you deployed this as a docker image ?
No plan. Also I now use rclone or aws CLI if I need access to S3 or other cloud storages.
You can make it into a container but it would need to have a access to our host user’s mount namespaces or vice versa.