Category Archives: Commands


iptables processing steps (original image link)

Redirect eth0:3240 to
sudo sysctl -w net.ipv4.ip_forward=1
sudo sysctl -a | grep 'net.ipv4.ip_forward'
sysctl net.ipv4.ip_forward -> this reads the value
sudo sysctl -w net.ipv4.conf.eth0.route_localnet=1
sudo sysctl -a | grep 'net.ipv4.conf.eth0.route_localnet'
# you'll need the rule below when using ufw
sudo ufw allow to port 32400

Suppose we have a server with an eth0 with the ip

Set this iptables rule on the server:
sudo iptables -t nat -I PREROUTING -p tcp -i eth0 --dport 3240 -j DNAT --to-destination
or using the ip for eth0:
sudo iptables -t nat -I PREROUTING -p tcp -d --dport 3240 -j DNAT --to-destination
in order to work this command on a client computer (but not on the server):
curl -kLD

Set only this iptables rule on the server:
sudo iptables -t nat -I OUTPUT -p tcp -o lo --dport 3240 -j REDIRECT --to-ports 32400
in order to work these curl commands on the server:
curl -kLD - 
curl -kLD -

View and delete rules
sudo iptables -t nat --line-number -L -v
sudo iptables -t nat -D PREROUTING 1 -> deletes rule 1 from PREROUTING
sudo iptables -t nat -D OUTPUT 1 -> deletes rule 1 from OUTPUT

Linux media conversion

sudo apt install libav-tools
webm to mp4
ffmpeg -i "Jurjak - Bucuresti.webm" -qscale 0 "Jurjak - Bucuresti.mp4"
ffmpeg -fflags +genpts -i "Jurjak - Bucuresti.webm" -r 24 "Jurjak - Bucuresti1.mp4" -> change to 24 FPS
ffmpeg -i "Jurjak - Bucuresti.webm" -vf scale=-1:720 "Jurjak - Bucuresti1.mp4" -> change to 720p
mp4 to mp3
ffmpeg -i "song-name.mp4" -b:a 192K -vn "song-name.mp3"
mkv to mp3
ffmpeg -i "song-name.mkv" -b:a 192K -vn "song-name.mp3"
webm to mp3
ffmpeg -i "song-name.webm" -b:a 192K -vn "song-name.mp3"

Linux hardware information

# sources:

sudo lshw -short -C memory
sudo lshw -C memory
sudo dmidecode -t memory

VGA memory
sudo dmesg | grep Reserving
[    0.000000] Reserving Intel graphics stolen memory at 0x5ef00000-0x7eefffff
compute now using a hexadecimal calculator 7eefffff - 5ef00000 + 1
the result is 512 * 1024 * 1024 which means 512 MB of memory
other related commands:
sudo dmesg | grep memory
lspci -v | grep -A10 VGA
grep -i mem /var/log/Xorg.0.log
sudo dmesg | grep drm
sudo dmesg | grep "Memory usable by graphics device"

dmesg | grep -i BIOS


What files are open?                       lsof
What process has a particular file open?   lsof /path/to/the/file
What files in some directory are open?     lsof +D /path/to/the/dir
What files does some user have open?       lsof -u username
What files do a group of users have open?  lsof -u user1,user2
What files are open by process name?       lsof -c procname
What files are open by PID?                lsof -p 123
What files are open by other PIDs?         lsof -p ^123
Show network activity                      lsof -i
What files are open by port?               lsof -i :25
                                           lsof -i :smtp
List PIDs                                  lsof -t
Show network activity for a user           lsof -a -u username -i
Show socket use                            lsof -U
Show NFS activity                          lsof -N

Ufw (uncomplicated firewall)


important files

Uncomplicated Firewall
sudo ufw show added
sudo ufw status verbose
sudo ufw show listening
sudo ufw limit ssh
sudo ufw allow 80
sudo ufw allow 443
sudo ufw allow 32400
sudo ufw allow in from
sudo ufw allow in on eth1 to port 3389 proto tcp comment 'allow RDP access from LAN'
sudo ufw allow from to any proto gre comment 'allow VPN with MarchenGarten'
sudo ufw allow from to any port 3389 proto tcp
sudo ufw allow in on enp1s0 to any port 8083
# sudo ufw delete limit 1443
# sudo ufw delete 11 -> removes rule with order number 11
tailf /var/log/kern.log | grep "\[UFW BLOCK\]"
tailf /var/log/syslog | grep "\[UFW BLOCK\]"

transmission firewall with peer-port-random-on-start = false
grep port /********/.config/transmission-daemon/settings.json
sed -i s/"peer-port-random-on-start\": true"/"peer-port-random-on-start\": false"/ /********/.config/transmission-daemon/settings.json
peerport="`grep peer-port\\" /********/.config/transmission-daemon/settings.json | awk '{sub(/,/,\"\",$2); print $2;}'`"
sudo ufw allow $peerport

transmission firewall with peer-port-random-on-start = true
sed -i s/"peer-port-random-on-start\": false"/"peer-port-random-on-start\": true"/ /********/.config/transmission-daemon/settings.json
grep peer-port-random-low /********/.config/transmission-daemon/settings.json
grep peer-port-random-high /********/.config/transmission-daemon/settings.json
# sudo ufw allow proto udp to any port 49152:65535
# sudo ufw allow proto tcp to any port 49152:65535
sudo ufw allow 49152:65535/tcp
sudo ufw allow 49152:65535/udp

show ufw logs
tailf /var/log/kern.log | grep "\[UFW BLOCK\]"
tailf /var/log/syslog | grep "\[UFW BLOCK\]"

enable at startup
# theoretically should work only this but practically doesn't:
sudo sed -i s/"ENABLED=no"/"ENABLED=yes"/ /etc/ufw/ufw.conf
# you should add this too to /etc/rc.local before "exit 0" line:
if ! ufw enable; then 
	echo "Can't start ufw!"
	echo "UFW started!"

# Set to yes to apply rules to support IPv6 (no means only IPv6 on loopback
# accepted). You will need to 'disable' and then 'enable' the firewall for
# the changes to take affect.
sudo sed -i s/"IPV6=yes"/"IPV6=no"/ /etc/default/ufw

Configuring port forwarding (add rules to /etc/ufw/before.rules)
# see also
sudo sed -i s/"#net\/ipv4\/ip_forward"/"net\/ipv4\/ip_forward"/ /etc/ufw/sysctl.conf

turn off ipv6 autoconfiguration
sudo sed -i s/"#net\/ipv6\/conf\/default\/autoconf=0"/"net\/ipv6\/conf\/default\/autoconf=0"/ /etc/ufw/sysctl.conf
sudo sed -i s/"#net\/ipv6\/conf\/all\/autoconf=0"/"net\/ipv6\/conf\/all\/autoconf=0"/ /etc/ufw/sysctl.conf

configuration status
grep -nr 'ENABLED' /etc/ufw/ufw.conf
grep -nr -P "DEFAULT_FORWARD_POLICY|IPV6=" /etc/default/ufw
grep -nr -P "net\/ipv4\/ip_forward|net\/ipv6\/conf\/default\/autoconf|net\/ipv6\/conf\/all\/autoconf" /etc/ufw/sysctl.conf

deny access to an ip
sudo ufw deny from

limit access to an ip
sudo ufw insert 1 limit from comment 'uri abuser limited to anywhere'
sudo ufw insert 1 limit in proto tcp from to port 80,443,49152:65535 comment 'tcp abuser limited to on 80,443,49152:65535'
sudo ufw insert 1 limit in proto udp from to port 80,443,49152:65535 comment 'udp abuser limited to on 80,443,49152:65535'

Redirect from to ( is on eth0 interface)

# The locally generated packets does not pass via the PREROUTING chain!
sudo sysctl -w net.ipv4.ip_forward=1
sudo sysctl -a | grep 'net.ipv4.ip_forward'
sudo sysctl -w net.ipv4.conf.eth0.route_localnet=1
sudo sysctl -a | grep 'net.ipv4.conf.eth0.route_localnet'

# It seems that you could configure the above in /etc/ufw/sysctl.conf too though I haven't tested it.
/etc/default/ufw should have DEFAULT_FORWARD_POLICY="ACCEPT"

# in /etc/ufw/before.rules before filter section:
# -A = append last
# -I = insert first
# sudo iptables -t nat -I PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 3000
# -I PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 3000
# sudo iptables -t nat -I PREROUTING -i eth0 -p tcp --dport 80 -j DNAT --to-destination
-I PREROUTING -i eth0 -p tcp --dport 80 -j DNAT --to-destination

# you'll need also the rule below  
sudo ufw allow to port 3000
# otherwise external users won't be allowed on port 80 and you'll see logs like this:
[UFW BLOCK] IN=eth0 OUT= MAC=3c:11:11:f0:21:11:00:11:0f:09:00:04:08:00 SRC= DST= LEN=60 TOS=0x00 PREC=0x00 TTL=63 ID=54696 DF PROTO=TCP SPT=9194 DPT=3000 WINDOW=26280 RES=0x00 SYN URGP=0

sudo ufw disable && sudo ufw enable

APT (Advanced Package Tool)

search packages by name using REGEX
apt-cache search libapr
apt-cache search 'php.*sql'
apt-cache search apache.\*perl
apt-cache search elvis\|vim

list the contents of a (not-installed) package
apt-file list mysql-client-5.1

showing package information
apt-cache showpkg libconfig-dev

check a package status
dpkg --get-selections | grep apache2

find which package contains a file
use also
apt-file update
apt-file -i search --regex /knemo-modem-transmit-receive.svg$ -> doesn't work for this specific file
apt-file -i search knemo-modem-transmit-receive.svg -> doesn't work for this specific file
apt-file search fusil/fusil-ogg123 -> this works so for the above fails maybe the repository from where the files were installed now is missing
dpkg --search knemo-modem-transmit-receive.svg -> but this one works (with owning package installed)
dpkg -S 'doc/*sql'

show package summary & contents
dpkg -l mongodb-compass
dpkg -L kibana
dpkg-query -L kibana

install deb file with automatic dependency resolution
sudo apt-get install ./Downloads/skypeforlinux-64.deb

install all packages you need to compile $PACKAGENAME
apt-get build-dep $PACKAGENAME

list repositories
grep -rh ^deb /etc/apt/sources.list /etc/apt/sources.list.d/

remove repository
sudo add-apt-repository --remove ppa:whatever/ppa

Linux various commands

# show only first line found:
grep "search this" nohup.out | sed -n '1p'
# show only last line found:
grep "search this" nohup.out | sed -n '$p'

# copy to current path the file /home/gigi/systemctl-services.txt 
# from (remote ssh server) using custom ssh port 27
scp -P 27 .


# pull and run an image
docker run hello-world
docker run -itP centos cat /etc/redhat-release
# lists all the images on your local system
docker images --help
docker images
# show all containers on the system
docker ps --help
docker ps -a
docker ps --no-trunc -a
# log into the Docker Hub
docker login --username=yourhubusername
# removing containers
docker ps -a
docker rm --help
docker rm 6075298d5896
# modify an image
docker run -itP -v "$HOME/KIT":/adrhc/KIT -v /home/adrk/certs/:/adrhc/certs centos /bin/bash
cd root
yum -y update
yum install -y wget
yum localinstall -y epel-release-latest-7.noarch.rpm
yum -y update
yum install -y nano mlocate zip unzip iftop htop net-tools openssh-clients openssh-server which sysvinit-tools psmisc less man-db openssl davfs2 fuse
# configure sshd
sed -i s/"#Port 22"/"Port 322"/ /etc/ssh/sshd_config
# if you want to change the port on a SELinux system, you have to tell SELinux about this change:
semanage port -a -t ssh_port_t -p tcp 322
# solving ERROR: Could not load host key: /etc/ssh/ssh_host_rsa_key
/usr/bin/ssh-keygen -A
netstat -tulpn
# committing d513e8dff620 container to a new named adrhc/centos7:v2 image:
docker commit -m "CentOS + epel" -a "adrhc" d513e8dff620 adrhc/centos7:v2
# or commit using the container's name (gloomy_goldstine) to a new named adrhc/centos7:v2 image:
docker commit -m "CentOS + epel" -a "adrhc" gloomy_goldstine adrhc/centos7:v2
# or commit last created container to a new named adrhc/centos7:v2 image:
docker commit -m "CentOS + epel" -a "adrhc" `docker ps -lq` adrhc/centos7:v2
# push an image to Docker Hub (see it at
docker push adrhc/centos7
# run the above commited image:
docker run -itP -v "$HOME/KIT":/adrhc/KIT -v /home/adrk/certs/:/adrhc/certs adrhc/centos7:v2 /bin/bash -> will create the container 3a63cfee66f4
# renaming 3a63cfee66f4 container created above
docker ps -a | grep 3a63cfee66f4
docker rename 3a63cfee66f4 my_centos7
# or rename last created container:
docker rename `docker ps -lq` my_centos7
# re-running the container 3a63cfee66f4
docker start 3a63cfee66f4
docker start my_centos7
# connecting to/bringing to front the running container
docker attach 3a63cfee66f4
docker attach my_centos7
# detach (see!msg/docker-user/nWXAnyLP9-M/kbv-FZpF4rUJ)
docker run -i -t → can be detached with ^P^Q and reattached with docker attach
docker run -i → cannot be detached with ^P^Q; will disrupt stdin
docker run → cannot be detached with ^P^Q; can SIGKILL client; can reattach with docker attach
# stopping a running container
docker stop my_centos7
# get the IP address of the running my_centos7 container
docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' my_centos7
# remove container
docker rm 3a63cfee66f4
# or by name
docker rm my_centos7
# remove image
docker images; docker ps -a
docker rmi 143d6907480f
docker rmi -f 143d6907480f -> removes related containers too
# connect using ssh to the container named my_centos7
# make sure the container exposes desired ports (
ssh -p 322 root@`docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' my_centos7`



The Alert Icon is part of the up2date application, which enables you to easily install system updates.

You may use any of the following formats to specify a package in a yum operation: name, name.architecture, name-version, name-version-release, name-version-release.architecture, and epoch:name-version-release.architecture.

The system-config-packages utility can ONLY be used if you do not update any packages and install only from the CD/DVD. This utility will fail if updates have been done.

Packages repository cache directories: /var/cache/yum/.

Put user specific aliases and functions in ~/.bashrc
# Aliases (linux commands + software)
alias free='free -m'
alias grep='grep --color'
alias grepx='grep -n --color'
alias ls='ls -hv --color --show-control-chars --group-directories-first'
alias ll='ls -hlv --color --show-control-chars --group-directories-first'
alias nethogs='nethogs egiga0'
alias psa='ps -e -o pid,pcpu,pmem,rss,ni,state,stat,start,time,nlwp,comm,cmd'
alias tailn='tail -f nohup.out'
alias tailf='tail -f'

To install the package tsclient, enter the command:
su -c 'yum install tsclient'
To install the package group MySQL Database, enter the command:
su -c 'yum groupinstall "MySQL Database"'
When you install a service, CentOS does not activate or start it. To configure a new service to run on bootup, choose Desktop->System Settings->Server Settings->Services, or use the chkconfig and service command-line utilities.

To update the tsclient package to the latest version, type:
su -c 'yum update tsclient'

To remove the tsclient package from your system, use the command:
su -c 'yum remove tsclient'
To remove all of the packages in the package group MySQL Database, enter the command:
su -c 'yum groupremove "MySQL Database"'

To search for a specific package by name, use the list function:
su -c 'yum list tsclient'
To view details about a package:
su -c 'yum info tsclient'
To search for version 0.132 of the application, use the command:
su -c 'yum list tsclient-0.132'
Use the standard wildcard characters to run any search option with a partial word or name: ? to represent any one character, and * to mean zero or more characters. Always add the escape character (\) before wildcards. To list all packages with names that begin with tsc, type:
su -c 'yum list tsc\*'

The search option checks the names, descriptions, summaries and listed package maintainers of all of the available packages:
su -c 'yum search mysql'

The provides function checks both the files included in the packages and the functions that the software provides. This option requires yum to download and read much larger index files than with the search option:
To search for all packages that include files called libneon, type:
su -c 'yum provides libneon'
To search for all packages that either provide a MTA (Mail Transport Agent) service, or include files with mta in their name:
su -c 'yum provides MTA'

CentOS systems automatically use the CentOS Project repositories. These include [base], [updates], [addons], [centosplus], [contrib], [extras], and [testing]. Some of these repositories are not enabled by default, because they might change core packages. The enabled repositories are [base], [updates], [addons], and [extras].
To perform a full system update, type this command:
su -c 'yum update'
To activate automatic daily updates, enter this command:
su -c '/sbin/chkconfig --level 345 yum on; /sbin/service yum start'

To add an extra repository, place a definition file in the /etc/yum.repos.d/ directory. The names of repository definition files end with .repo.
Default yum repositories are automatically used as up2date channels. If you want to add other channels for up2date, you must manually configure up2date to use these channels. To do so, edit the /etc/sysconfig/rhn/sources file.

To manually add a public key to your rpm keyring, use the import feature of the rpm utility. To import the file GPG-PUB-KEY.asc, type the following command:
su -c 'rpm --import GPG-PUB-KEY.asc'
to import the file GPG-PUB-KEY.asc on the web site use this command:
su -c 'rpm --import'

To purge the package data files, use this command:
su -c 'yum clean headers'
Run this command to remove all of the packages held in the caches:
su -c 'yum clean packages'

Enter this command to manually install the package tsclient-0.132-4.i386.rpm:
su -c 'yum localinstall tsclient-0.132-4.i386.rpm'

You can list yum groups with:
yum grouplist 
and for more (hidden) groups including the group ids, try ("group list" is equivalent to "grouplist"):
yum group list hidden | grep -i "X Window System"
To install Xfce group:
yum groups install "Xfce" 

Lists the repository ID, name, and number of packages it provides for each enabled repository:
yum repolist
# including disabled:
yum repolist all
# see also:
ll /etc/yum.repos.d
# enable a repository:
yum-config-manager --enable InstallMedia
# or edit repo file, e.g. (set enabled=1):
nano /etc/yum.repos.d/packagekit-media.repo

yum install yum-utils
# list files from mate-system-monitor.x86_64 package:
repoquery -l mate-system-monitor.x86_64

# list group content
yum group info "General Purpose Desktop"

# List installed packages with YUM
yum list installed

# list dependencies:
yum deplist openssl.x86_64
repoquery --requires --resolve git2u-core -> shows required packages
repoquery --requires git2u-core -> shows required files from packages required

# List the files in git2u-core-2.10.0-1.ius.el6.x86_64.rpm requiring
rpm -q --filerequire -p git2u-core-2.10.0-1.ius.el6.x86_64.rpm | grep

# List packages on which git2u-core-2.10.0-1.ius.el6.x86_64.rpm package depends:
rpm -q --requires -p git2u-core-2.10.0-1.ius.el6.x86_64.rpm

# install a package with no dependencies checking:
rpm -i --nodeps git2u-core-2.10.0-1.ius.el6.x86_64.rpm
# you then may continue to install depending packages with yum as usual:
yum install git2u-gitweb

# list repository content
yum list available | grep 'case sensitive repository id here'

# search for gcc package
yum list \*gcc\*
# install gcc
yum install gcc

# add a repository
yum-config-manager --add-repo

Asrock N3150DC-ITX

Windows with High Precision Event Timer (HPET)
I installed windows 7 with HPET disabled in BIOS.

Then if you do:
bcdedit /set useplatformclock true -> with HPET enabled windows won't boot
bcdedit /deletevalue useplatformclock -> disables HPET from an administrative cmd console

When HPET is enabled in BIOS but disabled in Window then Windows will move slower than usual.

Angularjs project setup (NODE & YEOMAN)

# see also

# general setup
sudo ln -s /usr/bin/nodejs /usr/bin/node	-> when node file is not found
sudo npm install -g yo bower grunt-cli generator-karma generator-angular
# see
# npm help list
# sudo npm list -gp grunt-wiredep@3.0.1
sudo npm list -gp --depth=0
# in project's path immediately after checkout from svn/git
npm install
bower install
# bower cache list, clean
bower cache list
bower cache clean


# npm cache data directory
npm config get cache
# npm cache list, clean
npm cache ls
npm cache clean

# list local node modules (project/node_modules)
npm ls --depth=0
# list global node modules (/usr/lib/node_modules)
npm ls -gp --depth=0

# install global package, specific version
sudo npm install -g webpack@2.2.0-rc.4

# uninstall global package (don't specify the version)
sudo npm uninstall -g -verbose webpack-dev-server

npm search angular-in-memory-web-api

# show the dependencies of the typings package at version 1.4.0 (or just check its package.json file)
npm view typings@1.4.0 dependencies

	npm WARN retry will retry, error on last attempt: Error: certificate has expired
	npm config set strict-ssl false

Working with GIT

# -> best visual guide
# -> best interactive guide
# -> graphical clients listed

# show diff for last commit only
git log -p HEAD -1
git log -p -n 1
git log -p HEAD~1..HEAD

# --graph draw a text based graph of the commits on the left hand side of the commit messages
# --decorate adds the names of branches or tags of the commits that are shown
git log --full-history --all --graph --color --oneline --date-order
git log --full-history --all --graph --color --date-order --pretty=format:"%x1b[31m%h%x09%x1b[32m%d%x1b[0m%x20%s"

# view changesets even though they are not referenced by any branch or tag
git reflog

# shows which files were altered and the relative number of lines added or deleted from each of them
git log --stat
git log --stat -1
git log --stat 57a6586c53bede7334d1be67b40e3c4f9cb63af9 -n 1
gitk --all -> GUI graph (run it from .git parent directory)

# shows both remote and local branches
git branch -a
# shows local branches
git branch
# shows remote branches
git branch -r

# restore file test4.txt to match the current HEAD
git reset HEAD test4.txt
git checkout -- test4.txt

# commit directly without first staging
git commit -m 'comment' -- README.txt

# display the information about the source of the clone:
git config --get remote.origin.url
git remote -v
git remote show origin

# git + ssh on port 222 (other than default 22) with ssh user gigi
git clone ssh://adrhc222/********/temp/git-try
# in ~/.ssh/config (create if not exists) put:
Host adrhc222
    Port 222
    User gigi
    ServerAliveInterval 30

# view commits on origin only
git fetch origin
git branch -a
* master
  remotes/origin/HEAD -> origin/master
git log --full-history --all --graph --color --pretty=format:"%x1b[31m%h%x09%x1b[32m%d%x1b[0m%x20%s" master..origin/master
git log --full-history --all --graph --color --pretty=format:"%x1b[31m%h%x09%x1b[32m%d%x1b[0m%x20%s" origin/readme-edits~1..origin/master

# Caching your GitHub password in Git
# see
git config --global credential.helper cache
git config --global credential.helper 'cache --timeout=3600'
git config --global credential.helper 'cache --timeout=31536000'

# create a “shared” repository which allows anyone with “group write” permissions on the folder to push into the repository
mkdir gitProject.git	-> .git it's a good practice for --bare repositories
cd gitProject.git
git init --bare --shared
# see -> post-update
cp -v hooks/post-update.sample hooks/post-update

# generation of info/refs and objects/info/packs files
cd test-bare-repository.git
man git-update-server-info
cat info/refs
cat objects/info/packs

# remove remote branch
git push origin --delete development

# create locally a project then upload to remote:
rm -rf .git
git init
git add .
git commit -m "Initial commit"
git remote add origin
# -u option determines the creation of the remote branch master
git push -u origin master

# /etc/gitconfig
git config --system --list

# global configuration
git config --global --list
git config --global --edit -> at top you'll see the file's path (e.g. $HOME/.gitconfig)

# in a project
git config --list

# for CRLF see core.autocrlf variable
# exists in $HOME/.gitconfig
git config --global core.autocrlf input
git config --global --get core.autocrlf

# workflow example:
git clone
git checkout master
git remote add upstream
git remote -v
sudo npm install webpack-dev-server rimraf webpack -g
npm install
git fetch upstream
git merge upstream/master

# create a lightweight tag for HEAD
git tag "tag-name-here" HEAD
# list tags
git tag -n
# list tags for current branch
git describe --tags --abbrev=0
# push all tags
git push origin --tags
# push one tag
git push origin "tag-name-here"
# get all new tags
git fetch --all
# checkout tag named n3.2.2
git checkout tags/n3.2.2

# copy/duplicate repository
git clone --bare
cd old-repository.git
git push --mirror

	error: Your local changes to the following files would be overwritten by merge:
	Please, commit your changes or stash them before you can merge.
	git stash
	git stash pop

	GitCommandFailedException: GitCommandFailedException: Command 'fetch' failed in /ffp/opt/couchpotato (128):
	fatal: unable to access '': SSL certificate problem: unable to get local issuer certificate
	#ignore ssl certificate
	git config --system http.sslverify false -> set in /etc/gitconfig
	#or use the Mozilla's CA Bundle certificate:
	wget -nv -O /ffp/etc/ssl/cert.pem
	git config --system http.sslcainfo /ffp/etc/ssl/cert.pem
	# export site certificate chain
	cd $HOME
	openssl s_client -connect -showcerts </dev/null 2>/dev/null | openssl x509 -outform PEM >
	openssl x509 -in -out
	# install site certificate chain
	su root
	root@gigi:~# cat >> /etc/ssl/certs/ca-certificates.crt
	/etc/ssl/certs/ca-certificates.crt is the result of "curl-config --ca"
	sudo bash -c "echo -n | openssl s_client -showcerts -connect 2>/dev/null | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' >> /etc/ssl/certs/ca-certificates.crt"
Remove last commit from remote repository (e.g. origin)
git push origin +HEAD^:master
git reset --hard origin/master
This only works if the remote repository has the configuration:
        denyNonFastForwards = false

Use master branch for angular2-websocket library in package.json
    "angular2-websocket": "",
    "angular2-websocket": "git+",

Install own private repository
npm install "git+" --save generates in package.json:
"angular2-websocket": "git+"
npm install "git+" --save generates in package.json:
"angular2-websocket": "git+"

Force update from git when running npm install
npm install "git+" --force

Fork after already cloning
git clone
git remote -v
git remote rm origin
git remote add upstream
git remote add origin
git branch --set-upstream-to=origin/master
git config core.autocrlf input -> linux only
git fetch --all
git pull upstream master

Checkout a remote branch
git branch -a
* master
  remotes/origin/HEAD -> origin/master

git branch --track frames remotes/origin/frames
git checkout frames

Checkout all remote branches
git branch -a
for b in `git branch -r | grep -v -- '->'`; do echo "${b##origin/}"; done
for b in `git branch -r | grep -v -- '->'`; do git branch --track ${b##origin/} $b; done
# for already existing branches will fail with no undesirable consequences, e.g.:
fatal: A branch named 'master' already exists.

Show which remote branch is tracked by which local one
git branch -vv
git status -sb
git status | grep 'Your branch is up-to-date with'

Create a local branch then the remote counterpart
git refactor maven-modules    -> using
git push -u origin 'refactor/maven-modules'

Create mysql system db

# see also

#CREATE SYSTEM DB (first step and mandatory before using mysql)
export SRVPATH=/ffp/opt/srv
mkdir -p $SRVPATH/mysql/innodb/
mkdir -p $SRVPATH/mysql/innodblogdir/
mkdir -p $SRVPATH/mysql/binlog/
mkdir -p $SRVPATH/mysql/log/
mkdir -p $SRVPATH/mysql/tmp/
mkdir -p $SRVPATH/mysql/data
cd /ffp
# use your my.cnf otherwise mysql won't create some innodb tables (see
scripts/mysql_install_db --user=root --datadir=/ffp/opt/srv/mysql/data --defaults-file=/ffp/etc/my.cnf
# if you don't have a my.cnf than run:
#scripts/mysql_install_db --user=root --datadir=/ffp/opt/srv/mysql/data
#Later (see mysql-five-tables-5.6.25.sql below) you'll create innodb missing tables.
cd ~ && /ffp/start/ start
mysql -p -> the default password is nothing
GRANT ALL ON *.* TO 'root'@'localhost' IDENTIFIED BY 'xxx321' WITH GRANT OPTION;
mysqladmin -u root password "xxx321"

#1. create databases and grant privileges
mysql -p
CREATE DATABASE exifweb CHARACTER SET utf8 COLLATE utf8_unicode_ci;
CREATE DATABASE wordpress CHARACTER SET utf8 COLLATE utf8_unicode_ci;
CREATE DATABASE owncloud702 CHARACTER SET utf8 COLLATE utf8_unicode_ci;
GRANT ALL ON exifweb.* TO 'exifweb'@'%' IDENTIFIED BY 'exifweb' WITH GRANT OPTION;
GRANT ALL ON wordpress.* TO 'wordpress'@'%' IDENTIFIED BY 'wordpress' WITH GRANT OPTION;
GRANT ALL ON owncloud702.* TO 'owncloud702'@'%' IDENTIFIED BY 'owncloud702' WITH GRANT OPTION;

#2. prepare sql for databases to restore
cp /i-data/md0/seagate-ext4/ProjectsNew/nsa310-config/trunk/mysql-db-design/*.sql.gz $HOME/temp/mysql-restore
cd $HOME/temp/mysql-restore
gunzip exifweb.sql.gz
gunzip wordpress.sql.gz
gunzip owncloud702.sql.gz
gunzip ghost.sql.gz
gunzip pydio.sql.gz
ls -l *.sql

#3. restore DBs
# The config variable max_allowed_packet must be larger than the imported sql script!
# Take five-tables.sql from me (it's a copy of the original):
# or take it from the original post:
mv 'file.php?id=19725&bug_id=67179' mysql-five-tables-5.6.25.sql
# this solves the innodb missing tables problem:
mysql -p mysql < mysql-five-tables-5.6.25.sql
# now import your databases:
mysql -u root -p exifweb < nsa310-config-trunk/mysql-db-design/exifweb.sql
mysql -u root -p wordpress < nsa310-config-trunk/mysql-db-design/wordpress.sql
mysql -u root -p owncloud702 < nsa310-config-trunk/mysql-db-design/owncloud702.sql
mysql -u root -p ghost < ghost.sql
mysql -u root -p pydio < pydio.sql
mysqlcheck -A --password=xxx321 --auto-repair

#UPGRADE DB schema
#If you use InnoDB, consider setting innodb_fast_shutdown to 0 before shutting down and upgrading your server.
/ffp/start/ stop
sed -i s/"innodb_fast_shutdown\s*=\s*1"/"innodb_fast_shutdown = 0"/ /ffp/etc/my.cnf
grep innodb_fast_shutdown /ffp/etc/my.cnf
/ffp/start/ start
/ffp/start/ stop
/ffp/start/ start
mysql_upgrade -u root -p --socket=/ffp/var/run/mysql/mysql.sock -v
/ffp/start/ stop
sed -i s/"innodb_fast_shutdown\s*=\s*0"/"innodb_fast_shutdown = 1"/ /ffp/etc/my.cnf
grep innodb_fast_shutdown /ffp/etc/my.cnf
/ffp/start/ start