Ubuntu Bionic: HD Graphics 520 i915 configuration

/etc/default/grub

GRUB_DEFAULT=0
GRUB_TIMEOUT_STYLE=hidden
GRUB_TIMEOUT=10

GRUB_CMDLINE_LINUX_DEFAULT="noplymouth intel_pstate=skylake_hwp i915.enable_rc6=1 i915.enable_guc=3 i915.enable_fbc=1 i915.semaphores=1 nvme_load=YES intel_pstate=enable i915.enable_psr=1 i915.disable_power_well=0"
# GRUB_CMDLINE_LINUX="elevator=deadline"

# Uncomment to disable graphical terminal (grub-pc only)
GRUB_TERMINAL=console

# you can see them in real GRUB with the command `vbeinfo'
# GRUB_GFXMODE=1024x768x16
GRUB_GFXPAYLOAD_LINUX=1900x1080x8

xorg.conf

Section "Device"
Identifier  "Intel Graphics"
Driver      "intel"
Option      "DRI" "3"
Option      "HWRotation" "true"
Option      "Tiling" "true"
Option      "SwapBuffersWait" "0"
Option      "ReprobeOutputs" "true"
Option      "VSync" "true"
# Option      "TearFree" "true"
# Option      "AccelMode" "sna"
EndSection
Section "dri"
Mode 0666
EndSection

List supported kernel mod options

modinfo -p i915

To check which options are currently enabled, run

systool -m i915 -av

If editing /etc/modprobe.d/* don’t forget to udpate ramdisk! Or use kernel params in GRUB.

Kernel Housekeeper Update Script

I use that script for Kernel Housekeeping since I’m mostly on mainline kernel. The script is currently used with Ubuntu Bionic Beaver.

#!/bin/bash

function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
function version_le() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" == "$1"; }
function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" != "$1"; }
function version_eq() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" == "$1"; }
#if version_gt $LATEST_KERNEL_VERSION_SHORT $CURRENT_KERNEL_VERSION_SHORT; then
#   echo "$LATEST_KERNEL_VERSION_SHORT is greater than $CURRENT_KERNEL_VERSION_SHORT"
# fi

OLD_IFS="$IFS"

IGNORE_PACKAGES="linux-headers-generic|linux-image-generic"
DUMP="$( curl -s "http://kernel.ubuntu.com/~kernel-ppa/mainline/?C=M;O=A" )"
HAS_INTERNET=0
if [ "$?" -eq "0" ] && [ -n "$DUMP" ]; then
LATEST_KERNEL_VERSION="$( echo "$DUMP" | sed 's/<\/*[^>]*>//g' | grep -E -o "^v[^/]+" |  sort -V  | grep -v '\-rc' | tail -1 | sed -r 's/^v//g' )"
LATEST_KERNEL_VERSION_SHORT="$( echo $LATEST_KERNEL_VERSION | tr '.-' '.' | cut -d. -f1-3 )"
HAS_INTERNET=1
else
LATEST_KERNEL_VERSION="Unable to resolve"
fi

# Test if we have a generic kernel version
GENERIC_KERNEL_VERSION=$( dpkg-query -W -f'${db:Status-Abbrev} ${Package} ${Version}\n'  | grep -e '^ii' | grep 'linux-image-generic' | awk '{ print $3 }'  )
echo "Generic Kernel Version"
echo "======================"
if [ -n "$GENERIC_KERNEL_VERSION" ]; then
GENERIC_KERNEL_VERSION_SHORT="$( echo $GENERIC_KERNEL_VERSION | tr '.-' '.' | cut -d. -f1-3 )"
echo "$GENERIC_KERNEL_VERSION_SHORT ($GENERIC_KERNEL_VERSION)"
IGNORE_PACKAGES="$IGNORE_PACKAGES|linux-.*-$GENERIC_KERNEL_VERSION_SHORT-"
# echo $IGNORE_PACKAGES;
else
echo "Not installed".
fi
echo

CURRENT_KERNEL_VERSION=$( uname -r | cut -d- -f1,2 )
CURRENT_KERNEL_VERSION_SHORT="$( echo $CURRENT_KERNEL_VERSION | tr '.-' '.' | cut -d. -f1-3 )"
echo Current Kernel
echo ==============
echo "$CURRENT_KERNEL_VERSION ($CURRENT_KERNEL_VERSION_SHORT)"
echo

echo "Latest Kernel (stable)"
echo "======================"
echo "$LATEST_KERNEL_VERSION ($LATEST_KERNEL_VERSION_SHORT)"
echo

IGNORE_PACKAGES="$IGNORE_PACKAGES|linux-.*$CURRENT_KERNEL_VERSION-"

# TODO add /boot/efi support
echo "Partitions "
echo ==========
BOOT_PARTITIONS="$( df -h --output=target | grep "/boot" | tr '\n' ':' )"
if [ -z "$BOOT_PARTITIONS" ]; then
echo "No special partitions."
else
IFS=':'
for boot_partition in $BOOT_PARTITIONS; do
boot_part_total=$( df $boot_partition -h --output=size | tail -n 1 | tr -d '[:space:]' )
boot_part_avail=$( df $boot_partition -h --output=avail | tail -n 1 | tr -d '[:space:]' )
echo
echo "$boot_partition size=$boot_part_total, free=$boot_part_avail"
# Installed kernels

if [ ! "$( echo $boot_partition | grep efi > /dev/null 2>&1; echo $? )" -eq 0 ]; then
INSTALLED="$( test -r ${boot_partition} && ls -tr ${boot_partition}/{vmlinuz,initrd.img}-* | cut -d- -f2 | sort | uniq | tr '\n' ':'  )"
IFS=':'
for version in $INSTALLED; do
echo -n " $version taking "
SIZE=$( cd $boot_partition; ls -1  | grep "\-${version}-" | xargs du -shc | tail -1 | cut -f 1 )
echo $SIZE
done
fi;
done;
fi
echo

echo "Installed packages"
echo "=================="
echo

ALL_INSTALLED_PACKAGES=$( dpkg-query -W -f'${db:Status-Abbrev} ${Package} ${Version}\n'  | grep -e '^ii' | grep -e 'linux-image\|linux-signed-image\|linux-headers' | awk '{print $2}' | sort -V  )
echo $ALL_INSTALLED_PACKAGES
echo

echo "Removeable packages"
echo "==================="
echo

# config files
REMOVEABLE_PACKAGES="$( dpkg-query -W -f'${db:Status-Abbrev} ${Package} ${Version}\n'  | grep -e '^rc' | grep -e 'linux-image\|linux-signed-image\|linux-headers' | awk '{print $2}' | sort | grep -v -E "$IGNORE_PACKAGES" )"

for tag in "linux-image" "linux-headers" "linux-signed-image" "linux-image-extra"; do
# echo c="$tag-$CURRENT_KERNEL_VERSION-";
packages_to_be_removed="$( echo $ALL_INSTALLED_PACKAGES | grep $tag | sort -V | awk 'index($0,c){exit} //' c="$tag-$CURRENT_KERNEL_VERSION" | grep -v -E "$IGNORE_PACKAGES" )"
REMOVEABLE_PACKAGES="$( echo -e "$REMOVEABLE_PACKAGES\n$packages_to_be_removed" | sed '/^$/d' | sort -V | uniq )"
done;

if ! [ $(id -u) = 0 ]; then
echo "You need to be root! Aborting..."
exit 1
fi

if [ -z "$REMOVEABLE_PACKAGES" ]; then
echo "No packages to remove found!"
echo
else
echo $REMOVEABLE_PACKAGES
read -p "Remove (y/n)?" CHOICE
echo

case "$CHOICE" in
y|Y )
CMD="apt-get remove --purge $( echo $REMOVEABLE_PACKAGES | tr '\n' ' ' )"
# echo $CMD
eval "$CMD"
apt-get autoremove
echo
;;
* ) ;;
esac
fi

##
# Install packages from latest kernel
if [ $HAS_INTERNET == 1 ]; then

echo "Kernel upgrade"
echo "=============="
echo

LATEST_INSTALLED=$( echo $ALL_INSTALLED_PACKAGES | grep "linux-image-$LATEST_KERNEL_VERSION-" )
if [ -n "$LATEST_INSTALLED" ]; then
echo "$LATEST_KERNEL_VERSION already installed. No need to update."
echo
else
PACKAGES="$( curl -s "http://kernel.ubuntu.com/~kernel-ppa/mainline/v$LATEST_KERNEL_VERSION_SHORT/" |  grep -o -E 'linux-[_[:alnum:]\.\-]+.deb' | sort | uniq )"
# echo $PACKAGES
MACHINE_TYPE=`uname -m`
if [ ${MACHINE_TYPE} == 'x86_64' ]; then
ARCH="amd64"
else
ARCH="i386"
fi
INSTALL_PACKAGES="$( echo $PACKAGES | grep -E "_($ARCH|all)" |  grep -v lowlatency )"
if [ -z "$INSTALL_PACKAGES" ]; then
echo "No packages to install found. Check your script for URL errors!" 1>&2
exit 1
fi

echo "Following packages will be installed:"
echo $INSTALL_PACKAGES
echo
read -p "Continue (y/n)?" CHOICE
echo

# echo $INSTALL_PACKAGES
# exit
TMP=$( mktemp -d --suffix=.kernel-$LATEST_KERNEL_VERSION )
cd $TMP

IFS=$'\n'
URLS=""
for package in $INSTALL_PACKAGES; do
echo -n Downloading $package...
url="http://kernel.ubuntu.com/~kernel-ppa/mainline/v$LATEST_KERNEL_VERSION/$package"
wget  -q "$url"
if [ "$?" -eq 0 ]; then
echo done.
else
echo failed. aborting.
exit 1
fi
done
if [ -n "$INSTALL_PACKAGES" ]; then
dpkg -i *.deb
fi
fi
fi

Away with grep! Use ripgrep!

I used ack for some time now grepping source code, yet I moved on to ripgrep since offers quiet more than than Grep und SourceCode-greps like SilverSearcher and Co. Plus it can mostly replace Grep and is mostly about 6x times faster (written In Rust). I am still astonished when I really get results instantly.

See here:
https://blog.burntsushi.net/ripgrep/
https://github.com/BurntSushi/ripgrep

This should be really installed on every server where you have to deal with source code.

Update Confluence Page by API

You can create you own API token here: https://id.atlassian.com/manage/api-tokens and live-update any information you want. The script basicaly creates a HTML file, pumps it by JQ into a JSON-file and uploads it.

#!/bin/bash
# Update Confluence page by API

# Strict mode
set -euo pipefail

# Some informations
PAGEID=602767382
SPACE="EL3"
AUTH="user@example.com:GETYOUROWNTOKENORNEVERKNOW"
API_URL="https://mycompany.atlassian.net/wiki/rest/api"

# Create temp dir
TMP=$( mktemp -d )

# Shutdown handler
shutdown() {
# Cleanup temp directory
if [ -e "$TMP" ]; then
rm -fr "$TMP"
fi
}
trap shutdown TERM EXIT

# We first need current page version for update with next-page version
curl --silent --user ${AUTH} ${API_URL}/content/${PAGEID} > ${TMP}/current.json
VERSION=$( cat ${TMP}/current.json | jq '.version.number' )
NEXTVERSION=$( expr 1 + ${VERSION} )
echo Got Version: ${VERSION}

# Get information
create page.txt

# Create HTML file
echo "

Date of creation: $( date --utc )
<pre>$( cat ${TMP}/page.txt | sed 's/$/<br\>/g' | tr -d '\n' )</br\></pre>
" > ${TMP}/page.html

# Prepare upload JSON with JQ
cat ${TMP}/page.html | jq -sR "@text | {\"id\":\"$PAGEID\",\"type\":\"page\",\"title\":\"Information Gathering\",\"space\":{\"key\":\"${SPACE}\"},\"body\":{\"storage\":{\"value\": . ,\"representation\":\"storage\"}},\"version\":{\"number\":${NEXTVERSION}}}"  > ${TMP}/upload.json

# Upload
curl \
--silent \
--user ${AUTH} \
-X PUT -H 'Content-Type: application/json' \
-T ${TMP}/upload.json \
${API_URL}/content/${PAGEID} \
1>/dev/null

echo Updated Version: ${NEXTVERSION}

IP in VPN vs. LAN: alias IP address by iptables

Sceneria: while your are at work you are on LAN and you use 192.168.x.x. But once you do home office you connect by VPN to the same DB and the IP changes to 10.x.x.x. And you don’t wanna change configs for your app ๐Ÿ™

Using IP tables that can be worked around easily:

# Enable IP forwarding
sudo sh -c 'echo "1" > /proc/sys/net/ipv4/ip_forward'

# LAN IP
IP_LAN=192.168.3.38

# VPN IP
IP_VIRTUAL=10.8.4.38

ping -c 1 -W 1 $IP_VIRTUAL                                                 
PING 10.8.4.38 (10.8.4.38) 56(84) bytes of data.
64 bytes from 10.8.4.38: icmp_seq=1 ttl=63 time=124 ms

ping -c 1 -W 1 $IP_LAN                                                          PING 192.168.3.38 (192.168.3.38) 56(84) bytes of data.

--- 1.23.23.2 ping statistics ---
1 packets transmitted, 0 received, 100% packet loss, time 0ms

sudo iptables -t nat -A PREROUTING -d $IP_LAN -j DNAT --to-destination $IP_VIRTUAL
sudo iptables -t nat -A POSTROUTING -j MASQUERADE

ping -c 1 -W 1 $IP_LAN
PING 192.168.3.38 (192.168.3.38) 56(84) bytes of data.
64 bytes from 192.168.3.38: icmp_seq=1 ttl=63 time=125 ms

Laptop Perfomance: irqbalancer vs. intel_pstate

Today I uninstalled irqbalancer and I had some more performance gain on my GNOME desktop.

The CPUfreq control panel showed me IRQBALANCE DETECTED and they say:

Why I should not use a single core for power saving

  • Moderns OS/kernel work better on multi-core architectures.
  • You need at least 1 core for a foreground application and 1 for background system services.
  • Linux Kernel switches between CPU cores to avoid overheating, CPU thermal throttling and to balance system load.
  • Many CPUs have Hyper-Threading (HT) technology enabled by default. So there is no reason to run half of a physical CPU core.

Very simply said. I miss some contradictions.

I’m not certain how laptop drains battery running without intel_pstate=skylake_hwp set to disable. The pstate does make sense to me. Mainly I think it was irqbalancer. I have do do some stress testing laterโ€ฆ

Infojunk July 2018

Better than VNC and TeamViewer – NoMachine and the NX protocol

The NX protocol is basicaly a successor to the X protocol and very nyce for Streaming. NoMachine implements that and is a better VNC with video streaming and a nice alternative to TeamViewer with good cross-platform capabilities. Also keyboard and mouse works cross-plattform without any problems. This is an issue with most alternatives to TeamViewer. Yet TeamViewer still is number #1 for me.

https://www.nomachine.com/

In-depth:

In 2001, the compression and transport protocol NX was created to improve on the performance of the native X display protocol to the point that it could be usable over a slow link such as a dial-up modem. It wrapped remote connections inย SSHย sessions for encryption.

The NX scheme was derived from that of DXPC โ€“ the Differential X Protocol Compressor project. NX 1.x was released to the general public on February 14, 2003, the final version of ‘NX’ being 3.5. The last update of 3.5 was in 2012.

The core compression technology up until NX 3.5 was made available to the community under the GNUย GPL2ย license whilst other components such as the NX Server and NX Client programs were proprietary.

In 2009, Google made a freely available Open Source GPL2 version of the server called Neatx. Other open source variants of NoMachine’s NX are also available (see below).

Starting in 2013, with the release of version 4.0, NX technology became closed source.

https://en.wikipedia.org/wiki/NX_technology

 

Anti-Patterns and Mental Downers

Most known about anti-patterns. This is now more about the psychological side.

It happened to me that I thought there must be a release chat and tried to gather information about it, asked in the company chat โ€“ no one answers. Finally I was added two month late. Today I recognized I was forget when gettign acces to some Shared Googe Drive folder. But everyone thought I know about the information that was shared within there. Because it is taken as obvious.

Perfectionismย is good for your work? Nope! The psychological definition says โ€œsuffering from not beeing enoughโ€, โ€œunmeetable high standardsโ€.

Itโ€™s even more a downer when you are in an complex environment that always asks โ€œwhen it will be ready?โ€ โ€œI canโ€™t imagine that this is such a big problem!โ€.

The opposite isย conscientiousness. Do you work well, gather information you need, communicate, be gentle. Care about quality of human interactions, quality of you work and then the product will also be good.

Aย false consensusย effect orย pluralistic ignoranceย very common among developers.

Butย most important is to know about cognitive biasesย and thatย others are humans, too:

Always look on the bright side of life! Evolve! I think that this is important!

Using confd and consul for config management

Consul is used in clouds as key/value store to holds configs just like etcd, Zookeeper, AWS SSM etc. Additionaly consul can provide a DNS server (service discovery).

In this tutorials we wanna test out, how we can create configs for our applications in development stack by local confd run. This will take configuration templates build configs by information provided by consul.

The idea is that we just provide templates to our applications and by these templates generate our configurations from key/values stored on consul.

Advantages

Applications repositories defined the config-format
No more versionless configs (yet we need to get enviornments straight!)

And hence itโ€™s consul weโ€™re one step closer to cloud-readiness

Example

Install confd

We wanna use local confd โ€“ one file โ€“ thanks to golang and easy install:

sudo wget -O /usr/local/bin/confd https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-amd64
sudo chmod a+x /usr/local/bin/confd

Run dockerized consul

Open up a new terminal and run consul kv exposed to localhost by port 8550 (in memory only!):

docker run --rm --name=dev-consul -p 8500:8500 -e CONSUL_BIND_INTERFACE=eth0 consul
Store some values
curl -X PUT -d 'db.example.com' http://localhost:8500/v1/kv/myapp/database/url
curl -X PUT -d 'Charles Brown' http://localhost:8500/v1/kv/myapp/database/user

# This is how you get values
curl -f http://localhost:8500/v1/kv/myapp/database/url?raw
curl -f http://localhost:8500/v1/kv/myapp/database/user?raw
curl -f http://localhost:8500/v1/kv/myapp/database/non-existent?raw
Add some config files to our working directory

conf.d/myconfig.toml

[template]
src = "myconfig.conf.tmpl"
dest = "myconfig.conf"
keys = [
"/myapp/database/url",
"/myapp/database/user",
]

templates/myconfig.conf.tmpl

[myconfig]
database_url = {{getv "/myapp/database/url"}}
database_user = {{getv "/myapp/database/user"}}
We now have folling directory structure:

Directory Structure

โ”œโ”€โ”€ conf.d
โ”‚ โ””โ”€โ”€ myconfig.toml
โ””โ”€โ”€ templates
โ””โ”€โ”€ myconfig.conf.tmpl

Create our configs by templates

$ confd -onetime -backend consul -node 127.0.0.1:8500 -confdir $(pwd)/ 
2018-06-07T13:42:41+02:00 refpad-16 confd[7689]: INFO Backend set to consul
2018-06-07T13:42:41+02:00 refpad-16 confd[7689]: INFO Starting confd
2018-06-07T13:42:41+02:00 refpad-16 confd[7689]: INFO Backend source(s) set to 127.0.0.1:8500
2018-06-07T13:42:41+02:00 refpad-16 confd[7689]: INFO Target config myconfig.conf out of sync
2018-06-07T13:42:41+02:00 refpad-16 confd[7689]: INFO Target config myconfig.conf has been updated
$ cat myconfig.conf 
[myconfig]
database_url = db.example.com
database_user = Charles Brown
$ touch xxx >> myconfig.conf
$ confd -onetime -backend consul -node 127.0.0.1:8500 -confdir $(pwd)/ 
2018-06-07T13:44:49+02:00 refpad-16 confd[8151]: INFO Backend set to consul
2018-06-07T13:44:49+02:00 refpad-16 confd[8151]: INFO Starting confd
2018-06-07T13:44:49+02:00 refpad-16 confd[8151]: INFO Backend source(s) set to 127.0.0.1:8500
2018-06-07T13:44:49+02:00 refpad-16 confd[8151]: INFO myconfig.conf has md5sum 7a30123886573e65b3c9d31a4e1c1abf should be e919f09ba963caad1051c212c5ca9453
2018-06-07T13:44:49+02:00 refpad-16 confd[8151]: INFO Target config myconfig.conf out of sync
2018-06-07T13:44:49+02:00 refpad-16 confd[8151]: INFO Target config myconfig.conf has been updated

Summary

There are some more advances examples on consul GitHub. We aim for an infrastructure that is able to reconfigure while running. Notifications. Monitoring. Service-Descovery. These topics are not covered here.

Also there are better tools like remco or consul template that support multiple backend sources and do secret stores and better auth.Just dig into it.

Further readings
– How Should I Get Application Configuration into my Docker Containers?
– DigitalOcean: How To Use Confd and Etcd to Dynamically Reconfigure Services
– Consul security model and Hashicorpโ€™s Vault