Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • rspencer/awit-zsh-superawesome
1 result
Show changes
Showing
with 0 additions and 1952 deletions
# Uses the command-not-found package zsh support
# as seen in http://www.porcheron.info/command-not-found-for-zsh/
# this is installed in Ubuntu
[[ -e /etc/zsh_command_not_found ]] && source /etc/zsh_command_not_found
# Arch Linux command-not-found support, you must have package pkgfile installed
# https://wiki.archlinux.org/index.php/Pkgfile#.22Command_not_found.22_hook
[[ -e /usr/share/doc/pkgfile/command-not-found.zsh ]] && source /usr/share/doc/pkgfile/command-not-found.zsh
# Fedora command-not-found support
if [ -f /usr/libexec/pk-command-not-found ]; then
command_not_found_handler () {
runcnf=1
retval=127
[ ! -S /var/run/dbus/system_bus_socket ] && runcnf=0
[ ! -x /usr/libexec/packagekitd ] && runcnf=0
if [ $runcnf -eq 1 ]
then
/usr/libexec/pk-command-not-found $@
retval=$?
fi
return $retval
}
fi
# Advanced Aliases.
# Use with caution
#
# ls, the common ones I use a lot shortened for rapid fire usage
alias l='ls -lFh' #size,show type,human readable
alias la='ls -lAFh' #long list,show almost all,show type,human readable
alias lr='ls -tRFh' #sorted by date,recursive,show type,human readable
alias lt='ls -ltFh' #long list,sorted by date,show type,human readable
alias ll='ls -l' #long list
alias ldot='ls -ld .*'
alias lS='ls -1FSsh'
alias lart='ls -1Fcart'
alias lrt='ls -1Fcrt'
alias zshrc='$EDITOR ~/.zshrc' # Quick access to the ~/.zshrc file
alias grep='grep --color'
alias sgrep='grep -R -n -H -C 5 --exclude-dir={.git,.svn,CVS} '
alias t='tail -f'
# Command line head / tail shortcuts
alias -g H='| head'
alias -g T='| tail'
alias -g G='| grep'
alias -g L="| less"
alias -g M="| most"
alias -g LL="2>&1 | less"
alias -g CA="2>&1 | cat -A"
alias -g NE="2> /dev/null"
alias -g NUL="> /dev/null 2>&1"
alias -g P="2>&1| pygmentize -l pytb"
alias dud='du -d 1 -h'
alias duf='du -sh *'
alias fd='find . -type d -name'
alias ff='find . -type f -name'
alias h='history'
alias hgrep="fc -El 0 | grep"
alias help='man'
alias p='ps -f'
alias sortnr='sort -n -r'
alias unexport='unset'
alias whereami=display_info
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# zsh is able to auto-do some kungfoo
# depends on the SUFFIX :)
if [ ${ZSH_VERSION//\./} -ge 420 ]; then
# open browser on urls
_browser_fts=(htm html de org net com at cx nl se dk dk php)
for ft in $_browser_fts ; do alias -s $ft=$BROWSER ; done
_editor_fts=(cpp cxx cc c hh h inl asc txt TXT tex)
for ft in $_editor_fts ; do alias -s $ft=$EDITOR ; done
_image_fts=(jpg jpeg png gif mng tiff tif xpm)
for ft in $_image_fts ; do alias -s $ft=$XIVIEWER; done
_media_fts=(ape avi flv mkv mov mp3 mpeg mpg ogg ogm rm wav webm)
for ft in $_media_fts ; do alias -s $ft=mplayer ; done
#read documents
alias -s pdf=acroread
alias -s ps=gv
alias -s dvi=xdvi
alias -s chm=xchm
alias -s djvu=djview
#list whats inside packed file
alias -s zip="unzip -l"
alias -s rar="unrar l"
alias -s tar="tar tf"
alias -s tar.gz="echo "
alias -s ace="unace l"
fi
# Make zsh know about hosts already accessed by SSH
zstyle -e ':completion:*:(ssh|scp|sftp|rsh|rsync):hosts' hosts 'reply=(${=${${(f)"$(cat {/etc/ssh_,~/.ssh/known_}hosts(|2)(N) /dev/null)"}%%[# ]*}//,/ })'
# ------------------------------------------------------------------------------
# FILE: compleat.plugin.zsh
# DESCRIPTION: oh-my-zsh plugin file.
# AUTHOR: Sorin Ionescu (sorin.ionescu@gmail.com)
# VERSION: 1.0.0
# ------------------------------------------------------------------------------
if (( ${+commands[compleat]} )); then
local prefix="${commands[compleat]:h:h}"
local setup="${prefix}/share/compleat-1.0/compleat_setup"
if [[ -f "$setup" ]]; then
if ! bashcompinit >/dev/null 2>&1; then
autoload -U bashcompinit
bashcompinit -i
fi
source "$setup"
fi
fi
# ------------------------------------------------------------------------------
# FILE: composer.plugin.zsh
# DESCRIPTION: oh-my-zsh composer plugin file.
# AUTHOR: Daniel Gomes (me@danielcsgomes.com)
# VERSION: 1.0.0
# ------------------------------------------------------------------------------
# Composer basic command completion
_composer_get_command_list () {
$_comp_command1 --no-ansi | sed "1,/Available commands/d" | awk '/^[ \t]*[a-z]+/ { print $1 }'
}
_composer_get_required_list () {
$_comp_command1 show -s --no-ansi | sed '1,/requires/d' | awk 'NF > 0 && !/^requires \(dev\)/{ print $1 }'
}
_composer () {
local curcontext="$curcontext" state line
typeset -A opt_args
_arguments \
'1: :->command'\
'*: :->args'
case $state in
command)
compadd $(_composer_get_command_list)
;;
*)
compadd $(_composer_get_required_list)
;;
esac
}
compdef _composer composer
compdef _composer composer.phar
# Aliases
alias c='composer'
alias csu='composer self-update'
alias cu='composer update'
alias cr='composer require'
alias ci='composer install'
alias ccp='composer create-project'
alias cdu='composer dump-autoload'
alias cgu='composer global update'
alias cgr='composer global require'
# install composer in the current directory
alias cget='curl -s https://getcomposer.org/installer | php'
# Add Composer's global binaries to PATH
export PATH=$PATH:~/.composer/vendor/bin
function copydir {
pwd | tr -d "\r\n" | pbcopy
}
\ No newline at end of file
function copyfile {
[[ "$#" != 1 ]] && return 1
local file_to_copy=$1
cat $file_to_copy | pbcopy
}
#Show progress while file is copying
# Rsync options are:
# -p - preserve permissions
# -o - preserve owner
# -g - preserve group
# -h - output in human-readable format
# --progress - display progress
# -b - instead of just overwriting an existing file, save the original
# --backup-dir=/tmp/rsync - move backup copies to "/tmp/rsync"
# -e /dev/null - only work on local files
# -- - everything after this is an argument, even if it looks like an option
alias cpv="rsync -poghb --backup-dir=/tmp/rsync -e /dev/null --progress --"
#compdef cpanm
##
# cpanminus Z Shell completion script
##
#
# Current supported cpanm version: 1.4000 (Tue Mar 8 01:00:49 PST 2011)
#
# The latest code is always located at:
# https://github.com/rshhh/cpanminus/blob/master/etc/_cpanm
#
local arguments curcontext="$curcontext"
typeset -A opt_args
arguments=(
# Commands
# '(--install -i)'{--install,-i}'[Installs the modules]'
'(- :)--self-upgrade[Upgrades itself]'
'(- :)--info[Displays distribution info on CPAN]'
'(--installdeps)--installdeps[Only install dependencies]'
'(--look)--look[Download/unpack the distribution and then open the directory with your shell]'
'(- :)'{--help,-h}'[Displays help information]'
'(- :)'{--version,-V}'[Displays software version]'
# Options
{--force,-f}'[Force install]'
{--notest,-n}'[Do not run unit tests]'
{--sudo,-S}'[sudo to run install commands]'
'(-v --verbose --quiet -q)'{--verbose,-v}'[Turns on chatty output]'
'(-q --quiet --verbose -v)'{--quiet,-q}'[Turns off all output]'
{--local-lib,-l}'[Specify the install base to install modules]'
{--local-lib-contained,-L}'[Specify the install base to install all non-core modules]'
'--mirror[Specify the base URL for the mirror (e.g. http://cpan.cpantesters.org/)]:URLs:_urls'
'--mirror-only[Use the mirror\''s index file instead of the CPAN Meta DB]'
'--prompt[Prompt when configure/build/test fails]'
'--reinstall[Reinstall the distribution even if you already have the latest version installed]'
'--interactive[Turn on interactive configure]'
'--scandeps[Scan the depencencies of given modules and output the tree in a text format]'
'--format[Specify what format to display the scanned dependency tree]:scandeps format:(tree json yaml dists)'
'--save-dists[Specify the optional directory path to copy downloaded tarballs]'
# '--uninst-shadows[Uninstalls the shadow files of the distribution that you\''re installing]'
'--auto-cleanup[Number of days that cpanm\''s work directories expire in. Defaults to 7]'
'(--no-man-pages)--man-pages[Generates man pages for executables (man1) and libraries (man3)]'
'(--man-pages)--no-man-pages[Do not generate man pages]'
# Note: Normally with "--lwp", "--wget" and "--curl" options set to true (which is the default) cpanm tries LWP,
# Wget, cURL and HTTP::Tiny (in that order) and uses the first one available.
# (So that the exclusions are not enabled here for the completion)
'(--lwp)--lwp[Use LWP module to download stuff]'
'(--wget)--wget[Use GNU Wget (if available) to download stuff]'
'(--curl)--curl[Use cURL (if available) to download stuff]'
# Other completions
'*:Local directory or archive:_files -/ -g "*.(tar.gz|tgz|tar.bz2|zip)(-.)"'
# '*::args: _normal' # this looks for default files (any files)
)
_arguments -s $arguments \
&& return 0
return 1
# Authors:
# https://github.com/AlexBio
# https://github.com/dbb
# https://github.com/Mappleconfusers
#
# Debian-related zsh aliases and functions for zsh
# Use aptitude if installed, or apt-get if not.
# You can just set apt_pref='apt-get' to override it.
if [[ -e $( which -p aptitude 2>&1 ) ]]; then
apt_pref='aptitude'
apt_upgr='safe-upgrade'
else
apt_pref='apt-get'
apt_upgr='upgrade'
fi
# Use sudo by default if it's installed
if [[ -e $( which -p sudo 2>&1 ) ]]; then
use_sudo=1
fi
# Aliases ###################################################################
# These are for more obscure uses of apt-get and aptitude that aren't covered
# below.
alias age='apt-get'
alias api='aptitude'
# Some self-explanatory aliases
alias acs="apt-cache search"
alias aps='aptitude search'
alias as="aptitude -F \"* %p -> %d \n(%v/%V)\" \
--no-gui --disable-columns search" # search package
# apt-file
alias afs='apt-file search --regexp'
# These are apt-get only
alias asrc='apt-get source'
alias app='apt-cache policy'
# superuser operations ######################################################
if [[ $use_sudo -eq 1 ]]; then
# commands using sudo #######
alias aac='sudo $apt_pref autoclean'
alias abd='sudo $apt_pref build-dep'
alias ac='sudo $apt_pref clean'
alias ad='sudo $apt_pref update'
alias adg='sudo $apt_pref update && sudo $apt_pref $apt_upgr'
alias adu='sudo $apt_pref update && sudo $apt_pref dist-upgrade'
alias afu='sudo apt-file update'
alias ag='sudo $apt_pref $apt_upgr'
alias ai='sudo $apt_pref install'
# Install all packages given on the command line while using only the first word of each line:
# acs ... | ail
alias ail="sed -e 's/ */ /g' -e 's/ *//' | cut -s -d ' ' -f 1 | "' xargs sudo $apt_pref install'
alias ap='sudo $apt_pref purge'
alias ar='sudo $apt_pref remove'
# apt-get only
alias ads='sudo apt-get dselect-upgrade'
# Install all .deb files in the current directory.
# Warning: you will need to put the glob in single quotes if you use:
# glob_subst
alias dia='sudo dpkg -i ./*.deb'
alias di='sudo dpkg -i'
# Remove ALL kernel images and headers EXCEPT the one in use
alias kclean='sudo aptitude remove -P ?and(~i~nlinux-(ima|hea) \
?not(~n`uname -r`))'
# commands using su #########
else
alias aac='su -ls \'$apt_pref autoclean\' root'
abd() {
cmd="su -lc '$apt_pref build-dep $@' root"
print "$cmd"
eval "$cmd"
}
alias ac='su -ls \'$apt_pref clean\' root'
alias ad='su -lc \'$apt_pref update\' root'
alias adg='su -lc \'$apt_pref update && aptitude $apt_upgr\' root'
alias adu='su -lc \'$apt_pref update && aptitude dist-upgrade\' root'
alias afu='su -lc "apt-file update"'
alias ag='su -lc \'$apt_pref $apt_upgr\' root'
ai() {
cmd="su -lc 'aptitude -P install $@' root"
print "$cmd"
eval "$cmd"
}
ap() {
cmd="su -lc '$apt_pref -P purge $@' root"
print "$cmd"
eval "$cmd"
}
ar() {
cmd="su -lc '$apt_pref -P remove $@' root"
print "$cmd"
eval "$cmd"
}
# Install all .deb files in the current directory
# Assumes glob_subst is off
alias dia='su -lc "dpkg -i ./*.deb" root'
alias di='su -lc "dpkg -i" root'
# Remove ALL kernel images and headers EXCEPT the one in use
alias kclean='su -lc '\''aptitude remove -P ?and(~i~nlinux-(ima|hea) \
?not(~n`uname -r`))'\'' root'
fi
# Completion ################################################################
#
# Registers a compdef for $1 that calls $apt_pref with the commands $2
# To do that it creates a new completion function called _apt_pref_$2
#
apt_pref_compdef() {
local f fb
f="_apt_pref_${2}"
eval "function ${f}() {
shift words;
service=\"\$apt_pref\";
words=(\"\$apt_pref\" '$2' \$words);
((CURRENT++))
test \"\${apt_pref}\" = 'aptitude' && _aptitude || _apt
}"
compdef "$f" "$1"
}
apt_pref_compdef aac "autoclean"
apt_pref_compdef abd "build-dep"
apt_pref_compdef ac "clean"
apt_pref_compdef ad "update"
apt_pref_compdef afu "update"
apt_pref_compdef ag "$apt_upgr"
apt_pref_compdef ai "install"
apt_pref_compdef ail "install"
apt_pref_compdef ap "purge"
apt_pref_compdef ar "remove"
apt_pref_compdef ads "dselect-upgrade"
# Misc. #####################################################################
# print all installed packages
alias allpkgs='aptitude search -F "%p" --disable-columns ~i'
# Create a basic .deb package
alias mydeb='time dpkg-buildpackage -rfakeroot -us -uc'
# Functions #################################################################
# create a simple script that can be used to 'duplicate' a system
apt-copy() {
print '#!/bin/sh'"\n" > apt-copy.sh
cmd='$apt_pref install'
for p in ${(f)"$(aptitude search -F "%p" --disable-columns \~i)"}; {
cmd="${cmd} ${p}"
}
print $cmd "\n" >> apt-copy.sh
chmod +x apt-copy.sh
}
# Prints apt history
# Usage:
# apt-history install
# apt-history upgrade
# apt-history remove
# apt-history rollback
# apt-history list
# Based On: http://linuxcommando.blogspot.com/2008/08/how-to-show-apt-log-history.html
apt-history () {
case "$1" in
install)
zgrep --no-filename 'install ' $(ls -rt /var/log/dpkg*)
;;
upgrade|remove)
zgrep --no-filename $1 $(ls -rt /var/log/dpkg*)
;;
rollback)
zgrep --no-filename upgrade $(ls -rt /var/log/dpkg*) | \
grep "$2" -A10000000 | \
grep "$3" -B10000000 | \
awk '{print $4"="$5}'
;;
list)
zcat $(ls -rt /var/log/dpkg*)
;;
*)
echo "Parameters:"
echo " install - Lists all packages that have been installed."
echo " upgrade - Lists all packages that have been upgraded."
echo " remove - Lists all packages that have been removed."
echo " rollback - Lists rollback information."
echo " list - Lists all contains of dpkg logs."
;;
esac
}
# Kernel-package building shortcut
kerndeb () {
# temporarily unset MAKEFLAGS ( '-j3' will fail )
MAKEFLAGS=$( print - $MAKEFLAGS | perl -pe 's/-j\s*[\d]+//g' )
print '$MAKEFLAGS set to '"'$MAKEFLAGS'"
appendage='-custom' # this shows up in $ (uname -r )
revision=$(date +"%Y%m%d") # this shows up in the .deb file name
make-kpkg clean
time fakeroot make-kpkg --append-to-version "$appendage" --revision \
"$revision" kernel_image kernel_headers
}
# List packages by size
function apt-list-packages {
dpkg-query -W --showformat='${Installed-Size} ${Package} ${Status}\n' | \
grep -v deinstall | \
sort -n | \
awk '{print $1" "$2}'
}
# enables cycling through the directory stack using
# Ctrl+Shift+Left/Right
#
# left/right direction follows the order in which directories
# were visited, like left/right arrows do in a browser
# NO_PUSHD_MINUS syntax:
# pushd +N: start counting from left of `dirs' output
# pushd -N: start counting from right of `dirs' output
insert-cycledleft () {
emulate -L zsh
setopt nopushdminus
builtin pushd -q +1 &>/dev/null || true
zle reset-prompt
}
zle -N insert-cycledleft
insert-cycledright () {
emulate -L zsh
setopt nopushdminus
builtin pushd -q -0 &>/dev/null || true
zle reset-prompt
}
zle -N insert-cycledright
# add key bindings for iTerm2
if [[ "$TERM_PROGRAM" == "iTerm.app" ]]; then
bindkey "^[[1;6D" insert-cycledleft
bindkey "^[[1;6C" insert-cycledright
else
bindkey "\e[1;6D" insert-cycledleft
bindkey "\e[1;6C" insert-cycledright
fi
\ No newline at end of file
##
# Navigate directory history using ALT-LEFT and ALT-RIGHT. ALT-LEFT moves back to directories
# that the user has changed to in the past, and ALT-RIGHT undoes ALT-LEFT.
#
dirhistory_past=($PWD)
dirhistory_future=()
export dirhistory_past
export dirhistory_future
export DIRHISTORY_SIZE=30
# Pop the last element of dirhistory_past.
# Pass the name of the variable to return the result in.
# Returns the element if the array was not empty,
# otherwise returns empty string.
function pop_past() {
eval "$1='$dirhistory_past[$#dirhistory_past]'"
if [[ $#dirhistory_past -gt 0 ]]; then
dirhistory_past[$#dirhistory_past]=()
fi
}
function pop_future() {
eval "$1='$dirhistory_future[$#dirhistory_future]'"
if [[ $#dirhistory_future -gt 0 ]]; then
dirhistory_future[$#dirhistory_future]=()
fi
}
# Push a new element onto the end of dirhistory_past. If the size of the array
# is >= DIRHISTORY_SIZE, the array is shifted
function push_past() {
if [[ $#dirhistory_past -ge $DIRHISTORY_SIZE ]]; then
shift dirhistory_past
fi
if [[ $#dirhistory_past -eq 0 || $dirhistory_past[$#dirhistory_past] != "$1" ]]; then
dirhistory_past+=($1)
fi
}
function push_future() {
if [[ $#dirhistory_future -ge $DIRHISTORY_SIZE ]]; then
shift dirhistory_future
fi
if [[ $#dirhistory_future -eq 0 || $dirhistory_futuret[$#dirhistory_future] != "$1" ]]; then
dirhistory_future+=($1)
fi
}
# Called by zsh when directory changes
function chpwd() {
push_past $PWD
# If DIRHISTORY_CD is not set...
if [[ -z "${DIRHISTORY_CD+x}" ]]; then
# ... clear future.
dirhistory_future=()
fi
}
function dirhistory_cd(){
DIRHISTORY_CD="1"
cd $1
unset DIRHISTORY_CD
}
# Move backward in directory history
function dirhistory_back() {
local cw=""
local d=""
# Last element in dirhistory_past is the cwd.
pop_past cw
if [[ "" == "$cw" ]]; then
# Someone overwrote our variable. Recover it.
dirhistory_past=($PWD)
return
fi
pop_past d
if [[ "" != "$d" ]]; then
dirhistory_cd $d
push_future $cw
else
push_past $cw
fi
}
# Move forward in directory history
function dirhistory_forward() {
local d=""
pop_future d
if [[ "" != "$d" ]]; then
dirhistory_cd $d
push_past $d
fi
}
# Bind keys to history navigation
function dirhistory_zle_dirhistory_back() {
# Erase current line in buffer
zle kill-buffer
dirhistory_back
zle accept-line
}
function dirhistory_zle_dirhistory_future() {
# Erase current line in buffer
zle kill-buffer
dirhistory_forward
zle accept-line
}
zle -N dirhistory_zle_dirhistory_back
# xterm in normal mode
bindkey "\e[3D" dirhistory_zle_dirhistory_back
bindkey "\e[1;3D" dirhistory_zle_dirhistory_back
# Putty:
bindkey "\e\e[D" dirhistory_zle_dirhistory_back
# GNU screen:
bindkey "\eO3D" dirhistory_zle_dirhistory_back
zle -N dirhistory_zle_dirhistory_future
bindkey "\e[3C" dirhistory_zle_dirhistory_future
bindkey "\e[1;3C" dirhistory_zle_dirhistory_future
bindkey "\e\e[C" dirhistory_zle_dirhistory_future
bindkey "\eO3C" dirhistory_zle_dirhistory_future
# Save dirstack history to .zdirs
# adapted from:
# github.com/grml/grml-etc-core/blob/master/etc/zsh/zshrc#L1547
DIRSTACKSIZE=${DIRSTACKSIZE:-20}
dirstack_file=${dirstack_file:-${HOME}/.zdirs}
if [[ -f ${dirstack_file} ]] && [[ ${#dirstack[*]} -eq 0 ]] ; then
dirstack=( ${(f)"$(< $dirstack_file)"} )
# "cd -" won't work after login by just setting $OLDPWD, so
[[ -d $dirstack[1] ]] && cd $dirstack[1] && cd $OLDPWD
fi
chpwd() {
if (( $DIRSTACKSIZE <= 0 )) || [[ -z $dirstack_file ]]; then return; fi
local -ax my_stack
my_stack=( ${PWD} ${dirstack} )
builtin print -l ${(u)my_stack} >! ${dirstack_file}
}
#compdef manage.py
typeset -ga nul_args
nul_args=(
'--verbosity=-[verbosity level; 0=minimal output, 1=normal output, 2=all output.]:Verbosity:((0\:minimal 1\:normal 2\:all))'
'--settings=-[the Python path to a settings module.]:file:_files'
'--pythonpath=-[a directory to add to the Python path.]:directory:_directories'
'--traceback[print traceback on exception.]'
"--no-color[Don't colorize the command output.]"
"--version[show program's version number and exit.]"
{-h,--help}'[show this help message and exit.]'
)
typeset -ga start_args
start_args=(
'--template=-[The path or URL to load the template from.]:directory:_directories'
'--extension=-[The file extension(s) to render (default: "py").]'
'--name=-[The file name(s) to render.]:file:_files'
)
typeset -ga db_args
db_args=(
'--database=-[Nominates a database. Defaults to the "default" database.]'
)
typeset -ga noinput_args
noinput_args=(
'--noinput[tells Django to NOT prompt the user for input of any kind.]'
)
typeset -ga no_init_data_args
no_init_data_args=(
'--no-initial-data[Tells Django not to load any initial data after database synchronization.]'
)
typeset -ga tag_args
tag_args=(
'--tag=-[Run only checks labeled with given tag.]'
'--list-tags[List available tags.]'
)
_managepy-check(){
_arguments -s : \
$tag_args \
$nul_args && ret=0
}
_managepy-changepassword(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-createcachetable(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-createsuperuser(){
_arguments -s : \
'--username=-[Specifies the login for the superuser.]' \
'--email=-[Specifies the email for the superuser.]' \
$noinput_args \
$db_args \
$nul_args && ret=0
}
_managepy-collectstatic(){
_arguments -s : \
'--link[Create a symbolic link to each file instead of copying.]' \
'--no-post-process[Do NOT post process collected files.]' \
'--ignore=-[Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.]' \
'--dry-run[Do everything except modify the filesystem.]' \
'--clear[Clear the existing files using the storage before trying to copy or link the original file.]' \
'--link[Create a symbolic link to each file instead of copying.]' \
'--no-default-ignore[Do not ignore the common private glob-style patterns "CVS", ".*" and "*~".]' \
$noinput_args \
$nul_args && ret=0
}
_managepy-dbshell(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-diffsettings(){
_arguments -s : \
"--all[Display all settings, regardless of their value.]"
$nul_args && ret=0
}
_managepy-dumpdata(){
_arguments -s : \
'--format=-[Specifies the output serialization format for fixtures.]:format:(json yaml xml)' \
'--indent=-[Specifies the indent level to use when pretty-printing output.]' \
'--exclude=-[An app_label or app_label.ModelName to exclude (use multiple --exclude to exclude multiple apps/models).]' \
'--natural-foreign[Use natural foreign keys if they are available.]' \
'--natural-primary[Use natural primary keys if they are available.]' \
"--all[Use Django's base manager to dump all models stored in the database.]" \
'--pks=-[Only dump objects with given primary keys.]' \
$db_args \
$nul_args \
'*::appname:_applist' && ret=0
}
_managepy-flush(){
_arguments -s : \
$no_init_data_args \
$db_args \
$noinput_args \
$nul_args && ret=0
}
_managepy-help(){
_arguments -s : \
'*:command:_managepy_cmds' \
$nul_args && ret=0
}
_managepy_cmds(){
local line
local -a cmd
_call_program help-command ./manage.py help \
|& sed -n '/^ /s/[(), ]/ /gp' \
| while read -A line; do cmd=($line $cmd) done
_describe -t managepy-command 'manage.py command' cmd
}
_managepy-inspectdb(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-loaddata(){
_arguments -s : \
'--ignorenonexistent[Ignores entries in the serialized data for fields that do not currently exist on the model.]' \
'--app=-[Only look for fixtures in the specified app.]:appname:_applist' \
'*::file:_files' \
$db_args \
$nul_args && ret=0
}
_managepy-makemessages(){
_arguments -s : \
'--locale=-[Creates or updates the message files for the given locale(s) (e.g. pt_BR).]' \
'--domain=-[The domain of the message files (default: "django").]' \
'--all[Updates the message files for all existing locales.]' \
'--extension=-[The file extension(s) to examine (default: "html,txt", or "js" if the domain is "djangojs").]' \
'--symlinks[Follows symlinks to directories when examining source code and templates for translation strings.]' \
'--ignore=-[Ignore files or directories matching this glob-style pattern.]' \
"--no-default-ignore[Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.]" \
"--no-wrap[Don't break long message lines into several lines.]" \
"--no-location[Don't write '#: filename:line' lines.]" \
'--no-obsolete[emove obsolete message strings.]' \
'--keep-pot[Keep .pot file after making messages.]' \
$nul_args && ret=0
}
_managepy-makemigrations(){
_arguments -s : \
'--dry-run[Just show what migrations would be made]' \
'--merge[Enable fixing of migration conflicts.]' \
'--empty[Create an empty migration.]' \
$noinput_args \
$nul_args && ret=0
}
_managepy-migrate(){
_arguments -s : \
'--fake[Mark migrations as run without actually running them]' \
'--list[Show a list of all known migrations and which are applied]' \
$no_init_data_args \
$noinput_args \
$db_args \
$nul_args && ret=0
}
_managepy-runfcgi(){
local state
local fcgi_opts
fcgi_opts=(
'protocol[fcgi, scgi, ajp, ... (default fcgi)]:protocol:(fcgi scgi ajp)'
'host[hostname to listen on..]:'
'port[port to listen on.]:'
'socket[UNIX socket to listen on.]:file:_files'
'method[prefork or threaded (default prefork)]:method:(prefork threaded)'
'maxrequests[number of requests a child handles before it is killed and a new child is forked (0 = no limit).]:'
'maxspare[max number of spare processes / threads.]:'
'minspare[min number of spare processes / threads.]:'
'maxchildren[hard limit number of processes / threads.]:'
'daemonize[whether to detach from terminal.]:boolean:(False True)'
'pidfile[write the spawned process-id to this file.]:file:_files'
'workdir[change to this directory when daemonizing.]:directory:_files'
'outlog[write stdout to this file.]:file:_files'
'errlog[write stderr to this file.]:file:_files'
)
_arguments -s : \
$nul_args \
'*: :_values "FCGI Setting" $fcgi_opts' && ret=0
}
_managepy-runserver(){
_arguments -s : \
'--ipv6[Tells Django to use an IPv6 address.]' \
'--nothreading[Tells Django to NOT use threading.]' \
'--noreload[Tells Django to NOT use the auto-reloader.]' \
'--nostatic[Tells Django to NOT automatically serve static files at STATIC_URL.]' \
'--insecure[Allows serving static files even if DEBUG is False.]' \
$nul_args && ret=0
}
_managepy-shell(){
_arguments -s : \
'--plain[Tells Django to use plain Python, not IPython.]' \
'--no-startup[When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.]' \
'--interface=-[Specify an interactive interpreter interface.]:INTERFACE:((ipython bpython))' \
$nul_args && ret=0
}
_managepy-sql(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-sqlall(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-sqlclear(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-sqlcustom(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-dropindexes(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-sqlflush(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-sqlindexes(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-sqlinitialdata(){
_arguments -s : \
$nul_args && ret=0
}
_managepy-sqlsequencereset(){
_arguments -s : \
$db_args \
$nul_args && ret=0
}
_managepy-squashmigrations(){
_arguments -s : \
'--no-optimize[Do not try to optimize the squashed operations.]' \
$noinput_args \
$nul_args && ret=0
}
_managepy-startapp(){
_arguments -s : \
$start_args \
$nul_args && ret=0
}
_managepy-startproject(){
_arguments -s : \
$start_args \
$nul_args && ret=0
}
_managepy-syncdb() {
_arguments -s : \
$noinput_args \
$no_init_data_args \
$db_args \
$nul_args && ret=0
}
_managepy-test() {
_arguments -s : \
'--failfast[Tells Django to stop running the test suite after first failed test.]' \
'--testrunner=-[Tells Django to use specified test runner class instead of the one specified by the TEST_RUNNER setting.]' \
'--liveserver=-[Overrides the default address where the live server (used with LiveServerTestCase) is expected to run from. The default value is localhost:8081.]' \
'--top-level-directory=-[Top level of project for unittest discovery.]' \
'--pattern=-[The test matching pattern. Defaults to test*.py.]:' \
$noinput_args \
'*::appname:_applist' \
$nul_args && ret=0
}
_managepy-testserver() {
_arguments -s : \
'--addrport=-[port number or ipaddr:port to run the server on.]' \
'--ipv6[Tells Django to use an IPv6 address.]' \
$noinput_args \
'*::fixture:_files' \
$nul_args && ret=0
}
_managepy-validate() {
_arguments -s : \
$tag_args \
$nul_args && ret=0
}
_managepy-commands() {
local -a commands
commands=(
"changepassword:Change a user's password for django.contrib.auth."
'check:Checks the entire Django project for potential problems.'
'compilemessages:Compiles .po files to .mo files for use with builtin gettext support.'
'createcachetable:Creates the table needed to use the SQL cache backend.'
'createsuperuser:Used to create a superuser.'
'collectstatic:Collect static files in a single location.'
'dbshell:Runs the command-line client for the current DATABASE_ENGINE.'
"diffsettings:Displays differences between the current settings.py and Django's default settings."
'dumpdata:Output the contents of the database as a fixture of the given format.'
'flush:Executes ``sqlflush`` on the current database.'
'help:manage.py help.'
'inspectdb:Introspects the database tables in the given database and outputs a Django model module.'
'loaddata:Installs the named fixture(s) in the database.'
'makemessages:Runs over the entire source tree of the current directory and pulls out all strings marked for translation.'
'makemigrations:Creates new migration(s) for apps.'
'migrate:Updates database schema. Manages both apps with migrations and those without.'
'runfcgi:Run this project as a fastcgi (or some other protocol supported by flup) application,'
'runserver:Starts a lightweight Web server for development.'
'shell:Runs a Python interactive interpreter.'
'sql:Prints the CREATE TABLE SQL statements for the given app name(s).'
'sqlall:Prints the CREATE TABLE, custom SQL and CREATE INDEX SQL statements for the given model module name(s).'
'sqlclear:Prints the DROP TABLE SQL statements for the given app name(s).'
'sqlcustom:Prints the custom table modifying SQL statements for the given app name(s).'
'sqldropindexes:Prints the DROP INDEX SQL statements for the given model module name(s).'
'sqlflush:Returns a list of the SQL statements required to return all tables in the database to the state they were in just after they were installed.'
'sqlindexes:Prints the CREATE INDEX SQL statements for the given model module name(s).'
"sqlinitialdata:RENAMED: see 'sqlcustom'"
'sqlsequencereset:Prints the SQL statements for resetting sequences for the given app name(s).'
'squashmigrations:Squashes an existing set of migrations (from first until specified) into a single new one.'
"startapp:Creates a Django app directory structure for the given app name in this project's directory."
"startproject:Creates a Django project directory structure for the given project name in this current directory."
"syncdb:Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
'test:Runs the test suite for the specified applications, or the entire site if no apps are specified.'
'testserver:Runs a development server with data from the given fixture(s).'
'validate:Validates all installed models.'
)
_describe -t commands 'manage.py command' commands && ret=0
}
_applist() {
local line
local -a apps
_call_program help-command "python -c \"import os.path as op, re, django.conf, sys;\\
bn=op.basename(op.abspath(op.curdir));[sys\\
.stdout.write(str(re.sub(r'^%s\.(.*?)$' %
bn, r'\1', i)) + '\n') for i in django.conf.settings.\\
INSTALLED_APPS if re.match(r'^%s' % bn, i)]\"" \
| while read -A line; do apps=($line $apps) done
_values 'Application' $apps && ret=0
}
_managepy() {
local curcontext=$curcontext ret=1
if ((CURRENT == 2)); then
_managepy-commands
else
shift words
(( CURRENT -- ))
curcontext="${curcontext%:*:*}:managepy-$words[1]:"
_call_function ret _managepy-$words[1]
fi
}
compdef _managepy manage.py
compdef _managepy django
compdef _managepy django-admin
compdef _managepy django-admin.py
compdef _managepy django-manage
# Docker-compose plugin for oh my zsh
A copy of the completion script from the [docker-compose](1) git repo.
[1]:[https://github.com/docker/compose/blob/master/contrib/completion/zsh/_docker-compose]
#compdef docker-compose
# Description
# -----------
# zsh completion for docker-compose
# https://github.com/sdurrheimer/docker-compose-zsh-completion
# -------------------------------------------------------------------------
# Version
# -------
# 0.1.0
# -------------------------------------------------------------------------
# Authors
# -------
# * Steve Durrheimer <s.durrheimer@gmail.com>
# -------------------------------------------------------------------------
# Inspiration
# -----------
# * @albers docker-compose bash completion script
# * @felixr docker zsh completion script : https://github.com/felixr/docker-zsh-completion
# -------------------------------------------------------------------------
# For compatibility reasons, Compose and therefore its completion supports several
# stack compositon files as listed here, in descending priority.
# Support for these filenames might be dropped in some future version.
__docker-compose_compose_file() {
local file
for file in docker-compose.y{,a}ml fig.y{,a}ml ; do
[ -e $file ] && {
echo $file
return
}
done
echo docker-compose.yml
}
# Extracts all service names from docker-compose.yml.
___docker-compose_all_services_in_compose_file() {
local already_selected
local -a services
already_selected=$(echo ${words[@]} | tr " " "|")
awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | grep -Ev "$already_selected"
}
# All services, even those without an existing container
__docker-compose_services_all() {
services=$(___docker-compose_all_services_in_compose_file)
_alternative "args:services:($services)"
}
# All services that have an entry with the given key in their docker-compose.yml section
___docker-compose_services_with_key() {
local already_selected
local -a buildable
already_selected=$(echo ${words[@]} | tr " " "|")
# flatten sections to one line, then filter lines containing the key and return section name.
awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' 2>/dev/null | grep -Ev "$already_selected"
}
# All services that are defined by a Dockerfile reference
__docker-compose_services_from_build() {
buildable=$(___docker-compose_services_with_key build)
_alternative "args:buildable services:($buildable)"
}
# All services that are defined by an image
__docker-compose_services_from_image() {
pullable=$(___docker-compose_services_with_key image)
_alternative "args:pullable services:($pullable)"
}
__docker-compose_get_services() {
local kind expl
declare -a running stopped lines args services
docker_status=$(docker ps > /dev/null 2>&1)
if [ $? -ne 0 ]; then
_message "Error! Docker is not running."
return 1
fi
kind=$1
shift
[[ $kind = (stopped|all) ]] && args=($args -a)
lines=(${(f)"$(_call_program commands docker ps ${args})"})
services=(${(f)"$(_call_program commands docker-compose 2>/dev/null ${compose_file:+-f $compose_file} ${compose_project:+-p $compose_project} ps -q)"})
# Parse header line to find columns
local i=1 j=1 k header=${lines[1]}
declare -A begin end
while (( $j < ${#header} - 1 )) {
i=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 1))
j=$(( $i + ${${header[$i,-1]}[(i) ]} - 1))
k=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 2))
begin[${header[$i,$(($j-1))]}]=$i
end[${header[$i,$(($j-1))]}]=$k
}
lines=(${lines[2,-1]})
# Container ID
local line s name
local -a names
for line in $lines; do
if [[ $services == *"${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"* ]]; then
names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}})
for name in $names; do
s="${${name%_*}#*_}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
s="$s, ${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"
s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}"
if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
stopped=($stopped $s)
else
running=($running $s)
fi
done
fi
done
[[ $kind = (running|all) ]] && _describe -t services-running "running services" running
[[ $kind = (stopped|all) ]] && _describe -t services-stopped "stopped services" stopped
}
__docker-compose_stoppedservices() {
__docker-compose_get_services stopped "$@"
}
__docker-compose_runningservices() {
__docker-compose_get_services running "$@"
}
__docker-compose_services () {
__docker-compose_get_services all "$@"
}
__docker-compose_caching_policy() {
oldp=( "$1"(Nmh+1) ) # 1 hour
(( $#oldp ))
}
__docker-compose_commands () {
local cache_policy
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
if [[ -z "$cache_policy" ]]; then
zstyle ":completion:${curcontext}:" cache-policy __docker-compose_caching_policy
fi
if ( [[ ${+_docker_compose_subcommands} -eq 0 ]] || _cache_invalid docker_compose_subcommands) \
&& ! _retrieve_cache docker_compose_subcommands;
then
local -a lines
lines=(${(f)"$(_call_program commands docker-compose 2>&1)"})
_docker_compose_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:})
_store_cache docker_compose_subcommands _docker_compose_subcommands
fi
_describe -t docker-compose-commands "docker-compose command" _docker_compose_subcommands
}
__docker-compose_subcommand () {
local -a _command_args
integer ret=1
case "$words[1]" in
(build)
_arguments \
'--no-cache[Do not use cache when building the image]' \
'*:services:__docker-compose_services_from_build' && ret=0
;;
(help)
_arguments ':subcommand:__docker-compose_commands' && ret=0
;;
(kill)
_arguments \
'-s[SIGNAL to send to the container. Default signal is SIGKILL.]:signal:_signals' \
'*:running services:__docker-compose_runningservices' && ret=0
;;
(logs)
_arguments \
'--no-color[Produce monochrome output.]' \
'*:services:__docker-compose_services_all' && ret=0
;;
(migrate-to-labels)
_arguments \
'(-):Recreate containers to add labels' && ret=0
;;
(port)
_arguments \
'--protocol=-[tcp or udap (defaults to tcp)]:protocol:(tcp udp)' \
'--index=-[index of the container if there are mutiple instances of a service (defaults to 1)]:index: ' \
'1:running services:__docker-compose_runningservices' \
'2:port:_ports' && ret=0
;;
(ps)
_arguments \
'-q[Only display IDs]' \
'*:services:__docker-compose_services_all' && ret=0
;;
(pull)
_arguments \
'--allow-insecure-ssl[Allow insecure connections to the docker registry]' \
'*:services:__docker-compose_services_from_image' && ret=0
;;
(rm)
_arguments \
'(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \
'-v[Remove volumes associated with containers]' \
'*:stopped services:__docker-compose_stoppedservices' && ret=0
;;
(run)
_arguments \
'--allow-insecure-ssl[Allow insecure connections to the docker registry]' \
'-d[Detached mode: Run container in the background, print new container name.]' \
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
'(-u --user)'{-u,--user=-}'[Run as specified username or uid]:username or uid:_users' \
"--no-deps[Don't start linked services.]" \
'--rm[Remove container after run. Ignored in detached mode.]' \
"--service-ports[Run command with the service's ports enabled and mapped to the host.]" \
'-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \
'(-):services:__docker-compose_services' \
'(-):command: _command_names -e' \
'*::arguments: _normal' && ret=0
;;
(scale)
_arguments '*:running services:__docker-compose_runningservices' && ret=0
;;
(start)
_arguments '*:stopped services:__docker-compose_stoppedservices' && ret=0
;;
(stop|restart)
_arguments \
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
'*:running services:__docker-compose_runningservices' && ret=0
;;
(up)
_arguments \
'--allow-insecure-ssl[Allow insecure connections to the docker registry]' \
'-d[Detached mode: Run containers in the background, print new container names.]' \
'--no-color[Produce monochrome output.]' \
"--no-deps[Don't start linked services.]" \
"--no-recreate[If containers already exist, don't recreate them.]" \
"--no-build[Don't build an image, even if it's missing]" \
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
"--x-smart-recreate[Only recreate containers whose configuration or image needs to be updated. (EXPERIMENTAL)]" \
'*:services:__docker-compose_services_all' && ret=0
;;
(version)
_arguments \
"--short[Shows only Compose's version number.]" && ret=0
;;
(*)
_message 'Unknown sub command'
esac
return ret
}
_docker-compose () {
# Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`.
# Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`.
if [[ $service != docker-compose ]]; then
_call_function - _$service
return
fi
local curcontext="$curcontext" state line ret=1
typeset -A opt_args
_arguments -C \
'(- :)'{-h,--help}'[Get help]' \
'--verbose[Show more output]' \
'(- :)'{-v,--version}'[Print version and exit]' \
'(-f --file)'{-f,--file}'[Specify an alternate docker-compose file (default: docker-compose.yml)]:file:_files -g "*.yml"' \
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
'(-): :->command' \
'(-)*:: :->option-or-argument' && ret=0
local counter=1
#local compose_file compose_project
while [ $counter -lt ${#words[@]} ]; do
case "${words[$counter]}" in
-f|--file)
(( counter++ ))
compose_file="${words[$counter]}"
;;
-p|--project-name)
(( counter++ ))
compose_project="${words[$counter]}"
;;
*)
;;
esac
(( counter++ ))
done
case $state in
(command)
__docker-compose_commands && ret=0
;;
(option-or-argument)
curcontext=${curcontext%:*:*}:docker-compose-$words[1]:
__docker-compose_subcommand && ret=0
;;
esac
return ret
}
_docker-compose "$@"
## Docker autocomplete plugin
- Adds autocomplete options for all docker commands.
- Will also show containerIDs and Image names where applicable
####Shows help for all commands
![General Help](http://i.imgur.com/tUBO9jh.png "Help for all commands")
####Shows your downloaded images where applicable
![Images](http://i.imgur.com/R8ZsWO1.png "Images")
####Shows your running containers where applicable
![Containers](http://i.imgur.com/WQtbheg.png "Containers")
Maintainer : Ahmed Azaan ([@aeonazaan](https://twitter.com/aeonazaan))
#compdef docker
# Docker autocompletion for oh-my-zsh
# Requires: Docker installed
# Author: Azaan (@aeonazaan)
# Updates: Bob Maerten (@bobmaerten) for Docker v0.9+
# Paul van den Berg (@bergvandenp) for Docker v1.3+
# ----- Helper functions
# Output a selectable list of all running docker containers
__docker_containers() {
declare -a cont_cmd
cont_cmd=($(docker ps | awk 'NR>1{print $NF":[CON("$1")"$2"("$3")]"}'))
if [[ 'X$cont_cmd' != 'X' ]]
_describe 'containers' cont_cmd
}
# Output a selectable list of all containers, even not running
__docker_all_containers() {
declare -a cont_cmd
cont_cmd=($(docker ps -a | awk 'NR>1{print $NF":[CON("$1")"$2"("$3")]"}'))
if [[ 'X$cont_cmd' != 'X' ]]
_describe 'containers' cont_cmd
}
# output a selectable list of all docker images
__docker_images() {
declare -a img_cmd
img_cmd=($(docker images | awk 'NR>1{print $1}'))
_describe 'images' img_cmd
}
# ----- Commands
# Seperate function for each command, makes extension easier later
# ---------------------------
__attach() {
_arguments \
'--no-stdin[Do not attach STDIN]' \
'--sig-proxy[Proxify all received signal to the process (even in non-tty mode)]'
__docker_containers
}
__build() {
_arguments \
'--no-cache[Do not use cache when building the image]' \
'(-q,--quiet)'{-q,--quiet}'[Suppress the verbose output generated by the containers]' \
'--rm[Remove intermediate containers after a successful build]' \
'(-t,--tag=)'{-t,--tag=}'[Repository name (and optionally a tag) to be applied to the resulting image in case of success]' \
'*:files:_files'
}
__commit() {
_arguments \
'(-a,--author=)'{-a,--author=}'[Author (e.g. "John Hannibal Smith <hannibal@a-team.com>")]' \
'(-c,--change=)'{-c,--change=}'[Apply Dockerfile instruction to the created image]' \
'(-m,--message=)'{-m,--message=}'[Commit message]' \
'(-p,--pause=)'{-p,--pause=}'[Pause container during commit]' \
}
__cp() {
__docker_containers
}
__create() {
_arguments \
'(-P,--publish-all=)'{-P,--publish-all=}'[Publish all exposed ports to the host interfaces]' \
'(-a,--attach=)'{-a,--attach=}'[Attach to STDIN, STDOUT or STDERR]' \
'--add-host=[Add a custom host-to-IP mapping]' \
'--cap-add=[Add Linux capabilities]' \
'--cap-drop=[Drop Linux capabilities]' \
'--cpuset-cpus=[CPUs in which to allow execution (0-3, 0,1)]' \
'(-c,--cpu-shares=)'{-c,--cpu-shares=}'[CPU shares (relative weight)]' \
'--cidfile=[Write the container ID to the file]' \
'--device=[Add a host device to the container]' \
'--dns=[Set custom dns servers]' \
'--dns-search=[Set custom DNS search domains]' \
'(-e,--env=)'{-e,--env=}'[Set environment variables]' \
'--env-file=[Read in a file of environment variables]' \
'--entrypoint=[Overwrite the default entrypoint of the image]' \
'--expose=[Expose a port from the container without publishing it to your host]' \
'(-h,--hostname=)'{-h,--hostname=}'[Container host name]' \
'(-i,--interactive=)'{-i,--interactive=}'[Keep STDIN open even if not attached]' \
'--ipc=[IPC namespace to use]' \
'(-l,--label=)'{-l,--label=}'[Set meta data on a container]' \
'--link=[Add link to another container (name:alias)]' \
'--log-driver=[Logging driver for the container]' \
'--lxc-conf=[Add custom LXC options]' \
'--mac-address=[Container MAC address (e.g. 92:d0:c6:0a:29:33)]' \
'(-m,--memory=)'{-m,--memory=}'[Memory limit (format: <number><optional unit>, where unit = b, k, m or g)]' \
'--net=[Set the Network mode for the container]' \
'--name=[Assign a name to the container]' \
'--pid=[PID namespace to use]' \
'(-p,--publish=)'{-p,--publish=}'[Publish a container''s port to the host (format: ip:hostPort:containerPort/protocol)]' \
'--privileged=[Give extended privileges to this container]' \
'--restart=[Restart policy to apply when a container exits]' \
'--security-opt=[Security Options]' \
'--sig-proxy=[Proxify all received signal to the process (even in non-tty mode)]' \
'(-t,--tty=)'{-t,--tty=}'[Allocate a pseudo-tty]' \
'(-u,--user=)'{-u,--user=}'[Username or UID]' \
'--ulimit=[Ulimit options]' \
'(-v,--volume=)'{-v,--volume=}'[Bind mount a volume (e.g. -v /host:/container or -v /container)]' \
'--volumes-from=[Mount volumes from the specified container(s)]' \
'(-w,--workdir=)'{-w,--workdir=}'[Working directory inside the container]'
__docker_images
}
__diff() {
__docker_containers
}
__events() {
_arguments \
'--since=[Show previously created events and then stream.]'
}
__export() {
__docker_containers
}
__history() {
_arguments \
'--no-trunc=[Don''t truncate output]' \
'(-q,--quiet)'{-q,--quiet}'[Only show numeric IDs]'
__docker_images
}
__images() {
_arguments \
'(-a,--all)'{-a,--all}'[Show all images (by default filter out the intermediate images used to build)]' \
'--no-trunc[Don''t truncate output]' \
'(-q,--quiet=)'{-q,--quiet=}'[Only show numeric IDs]' \
'(-t,--tree=)'{-t,--tree=}'[Output graph in tree format]' \
'(-v,--viz=)'{-v,--viz=}'[Output graph in graphviz format]'
__docker_images
}
__import() {
_arguments '*:files:_files'
}
__info() {
# no arguments
}
__inspect() {
__docker_images
__docker_all_containers
}
__kill() {
_arguments \
'(-s,--signal=)'{-s,--signal=}'[KILL Signal]'
__docker_containers
}
__load() {
_arguments '*:files:_files'
}
__login() {
_arguments \
'(-e,--email=)'{-e,-email=}'[Email]' \
'(-p,--password=)'{-p,-password=}'[Password]' \
'(-u,--username=)'{-u,-username=}'[Username]'
}
__logs() {
_arguments \
'(-f,--follow)'{-f,-follow}'[Follow log output]'
__docker_containers
}
__port() {
__docker_containers
}
__top() {
__docker_containers
}
__ps() {
_arguments \
'(-a,--all)'{-a,--all}'[Show all containers. Only running containers are shown by default.]' \
'--before-id=[Show only container created before Id, include non-running ones.]' \
'(-l,--latest)'{-l,--latest}'[Show only the latest created container, include non-running ones.]' \
'-n=[Show n last created containers, include non-running ones. default=-1.]' \
'--no-trunc[Don''t truncate output]' \
'(-q,--quiet)'{-q,--quiet}'[Only display numeric IDs]' \
'(-s,--size)'{-s,--size}'[Display sizes]' \
'--since-id=[Show only containers created since Id, include non-running ones.]'
}
__pull() {
_arguments \
'(-t,--tag=)'{-t,--tag=}'[Download tagged image in repository]'
}
__push() {
# no arguments
}
__restart() {
_arguments \
'(-t,--time=)'{-t,--time=}'[Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10]'
__docker_containers
}
__rm() {
_arguments \
'(-f,--force=)'{-f,--force=}'[Force removal of running container]' \
'(-l,--link=)'{-l,--link=}'[Remove the specified link and not the underlying container]' \
'(-v,--volumes=)'{-v,--volumes=}'[Remove the volumes associated to the container]'
__docker_all_containers
}
__rmi() {
_arguments \
'(-f,--force=)'{-f,--force=}'[Force]'
__docker_images
}
__run() {
_arguments \
'(-P,--publish-all=)'{-P,--publish-all=}'[Publish all exposed ports to the host interfaces]' \
'(-a,--attach=)'{-a,--attach=}'[Attach to STDIN, STDOUT or STDERR]' \
'--add-host=[Add a custom host-to-IP mapping]' \
'--cap-add=[Add Linux capabilities]' \
'--cap-drop=[Drop Linux capabilities]' \
'--cpuset-cpus=[CPUs in which to allow execution (0-3, 0,1)]' \
'(-c,--cpu-shares=)'{-c,--cpu-shares=}'[CPU shares (relative weight)]' \
'--cidfile=[Write the container ID to the file]' \
'(-d,--detach=)'{-d,--detach=}'[Run container in the background, print new container id]' \
'--device=[Add a host device to the container]' \
'--dns=[Set custom dns servers]' \
'--dns-search=[Set custom DNS search domains]' \
'(-e,--env=)'{-e,--env=}'[Set environment variables]' \
'--env-file=[Read in a file of environment variables]' \
'--entrypoint=[Overwrite the default entrypoint of the image]' \
'--expose=[Expose a port from the container without publishing it to your host]' \
'(-h,--hostname=)'{-h,--hostname=}'[Container host name]' \
'(-i,--interactive=)'{-i,--interactive=}'[Keep STDIN open even if not attached]' \
'--ipc=[IPC namespace to use]' \
'(-l,--label=)'{-l,--label=}'[Set meta data on a container]' \
'--link=[Add link to another container (name:alias)]' \
'--log-driver=[Logging driver for the container]' \
'--lxc-conf=[Add custom LXC options]' \
'--mac-address=[Container MAC address (e.g. 92:d0:c6:0a:29:33)]' \
'(-m,--memory=)'{-m,--memory=}'[Memory limit (format: <number><optional unit>, where unit = b, k, m or g)]' \
'--net=[Set the Network mode for the container]' \
'--name=[Assign a name to the container]' \
'--pid=[PID namespace to use]' \
'(-p,--publish=)'{-p,--publish=}'[Publish a container''s port to the host (format: ip:hostPort:containerPort/protocol)]' \
'--privileged=[Give extended privileges to this container]' \
'--restart=[Restart policy to apply when a container exits]' \
'--rm=[Automatically remove the container when it exits (incompatible with -d)]' \
'--security-opt=[Security Options]' \
'--sig-proxy=[Proxify all received signal to the process (even in non-tty mode)]' \
'(-t,--tty=)'{-t,--tty=}'[Allocate a pseudo-tty]' \
'(-u,--user=)'{-u,--user=}'[Username or UID]' \
'--ulimit=[Ulimit options]' \
'(-v,--volume=)'{-v,--volume=}'[Bind mount a volume (e.g. -v /host:/container or -v /container)]' \
'--volumes-from=[Mount volumes from the specified container(s)]' \
'(-w,--workdir=)'{-w,--workdir=}'[Working directory inside the container]'
__docker_images
}
__search() {
_arguments \
'--no-trunc=[Don''t truncate output]' \
'-s,--stars=)'{-s,--stars=}'[Only displays with at least xxx stars]' \
'-t,--trusted=)'{-t,--trusted=}'[Only show trusted builds]'
}
__save() {
__docker_images
}
__start() {
_arguments \
'(-a,--attach=)'{-a,--attach=}'[Attach container''s STDOUT/STDERR and forward all signals to the process]' \
'(-i,--interactive=)'{-i,--interactive=}'[Attach container''s STDIN]'
__docker_all_containers
}
__stats() {
__docker_containers
}
__stop() {
_arguments \
'(-t,--time=)'{-t,--time=}'[Number of seconds to wait for the container to stop before killing it.]'
__docker_containers
}
__tag() {
_arguments \
'(-f,--force=)'{-f,--force=}'[Force]'
__docker_images
}
__version() {
# no arguments
}
__wait() {
__docker_containers
}
__exec() {
_arguments \
'(-d,--detach=)'{-d,--detach=}'[Detached mode: run command in the background]' \
'(-i,--interactive=)'{-i,--interactive=}'[Keep STDIN open even if not attached]' \
'(-t,--tty=)'{-t,--tty=}'[Allocate a pseudo-TTY]'
__docker_containers
}
# end commands ---------
# ----------------------
local -a _1st_arguments
_1st_arguments=(
"attach":"Attach to a running container"
"build":"Build a container from a Dockerfile"
"commit":"Create a new image from a container's changes"
"cp":"Copy files/folders from the containers filesystem to the host path"
"create":"Create new container without running it"
"diff":"Inspect changes on a container's filesystem"
"events":"Get real time events from the server"
"export":"Stream the contents of a container as a tar archive"
"history":"Show the history of an image"
"images":"List images"
"import":"Create a new filesystem image from the contents of a tarball"
"info":"Display system-wide information"
"inspect":"Return low-level information on a container"
"kill":"Kill a running container"
"load":"Load an image from a tar archive"
"login":"Register or Login to the docker registry server"
"logs":"Fetch the logs of a container"
"port":"Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"
"ps":"List containers"
"pull":"Pull an image or a repository from the docker registry server"
"push":"Push an image or a repository to the docker registry server"
"restart":"Restart a running container"
"rm":"Remove one or more containers"
"rmi":"Remove one or more images"
"run":"Run a command in a new container"
"save":"Save an image to a tar archive"
"search":"Search for an image in the docker index"
"start":"Start a stopped container"
"stats":"Display a live stream of one or more containers' resource usage statistics"
"stop":"Stop a running container"
"tag":"Tag an image into a repository"
"top":"Lookup the running processes of a container"
"version":"Show the docker version information"
"wait":"Block until a container stops, then print its exit code"
"exec":"Run a task inside a running container"
)
_arguments '*:: :->command'
if (( CURRENT == 1 )); then
_describe -t commands "docker command" _1st_arguments
return
fi
local -a _command_args
case "$words[1]" in
attach)
__attach ;;
build)
__build ;;
commit)
__commit ;;
cp)
__cp ;;
create)
__create ;;
diff)
__diff ;;
events)
__events ;;
export)
__export ;;
history)
__history ;;
images)
__images ;;
import)
__import ;;
info)
__info ;;
inspect)
__inspect ;;
kill)
__kill ;;
load)
__load ;;
login)
__login ;;
logs)
__logs ;;
port)
__port ;;
ps)
__ps ;;
pull)
__pull ;;
push)
__push ;;
restart)
__restart ;;
rm)
__rm ;;
rmi)
__rmi ;;
run)
__run ;;
save)
__save ;;
search)
__search ;;
stats)
__stats ;;
start)
__start ;;
stop)
__stop ;;
tag)
__tag ;;
top)
__top ;;
version)
__version ;;
wait)
__wait ;;
exec)
__exec ;;
esac
# Emacs 23 daemon capability is a killing feature.
# One emacs process handles all your frames whether
# you use a frame opened in a terminal via a ssh connection or X frames
# opened on the same host.
# Benefits are multiple
# - You don't have the cost of starting Emacs all the time anymore
# - Opening a file is as fast as Emacs does not have anything else to do.
# - You can share opened buffered across opened frames.
# - Configuration changes made at runtime are applied to all frames.
if "$ZSH/tools/require_tool.sh" emacs 23 2>/dev/null ; then
export EMACS_PLUGIN_LAUNCHER="$ZSH/plugins/emacs/emacsclient.sh"
# set EDITOR if not already defined.
export EDITOR="${EDITOR:-${EMACS_PLUGIN_LAUNCHER}}"
alias emacs="$EMACS_PLUGIN_LAUNCHER --no-wait"
alias e=emacs
# same than M-x eval but from outside Emacs.
alias eeval="$EMACS_PLUGIN_LAUNCHER --eval"
# create a new X frame
alias eframe='emacsclient --alternate-editor "" --create-frame'
# to code all night long
alias emasc=emacs
alias emcas=emacs
# Write to standard output the path to the file
# opened in the current buffer.
function efile {
local cmd="(buffer-file-name (window-buffer))"
"$EMACS_PLUGIN_LAUNCHER" --eval "$cmd" | tr -d \"
}
# Write to standard output the directory of the file
# opened in the the current buffer
function ecd {
local cmd="(let ((buf-name (buffer-file-name (window-buffer))))
(if buf-name (file-name-directory buf-name)))"
local dir="$($EMACS_PLUGIN_LAUNCHER --eval $cmd | tr -d \")"
if [ -n "$dir" ] ;then
echo "$dir"
else
echo "can not deduce current buffer filename." >/dev/stderr
return 1
fi
}
fi
## Local Variables:
## mode: sh
## End:
#!/bin/sh
# get list of available X windows.
x=`emacsclient --alternate-editor '' --eval '(x-display-list)' 2>/dev/null`
if [ -z "$x" ] || [ "$x" = "nil" ] ;then
# Create one if there is no X window yet.
emacsclient --alternate-editor "" --create-frame "$@"
else
# prevent creating another X frame if there is at least one present.
emacsclient --alternate-editor "" "$@"
fi
# Ember-cli
**Maintainer:** [BilalBudhani](http://www.github.com/BilalBudhani)
Ember-cli (http://www.ember-cli.com/)
### List of Aliases
alias es='ember serve'
alias ea='ember addon'
alias eb='ember build'
alias ed='ember destroy'
alias eg='ember generate'
alias eh='ember help'
alias ein='ember init'
alias eia='ember install:addon'
alias eib='ember install:bower'
alias ein='ember install:npm'
alias ei='ember install'
alias et='ember test'
alias eu='ember update'
alias ev='ember version'