Linux

Help! We’ve ran into a DockerHub rate limit!

About

Yes, it is still happining. In 2025! Here you will find:

Podman Dockerhub Mirror Configuration

~/.config/containers/registries.conf.d/dockerhub-mirror.conf:

[[registry]]                                                                                              
prefix = "docker.io"                                                      
insecure = false                                                                              
blocked = false
location = "public.ecr.aws/docker"  

[[registry.mirror]]
location = "mirror.gcr.io"

[[registry.mirror]]
location = "gitlab.com/acme-org/dependency_proxy/containers"

[[registry.mirror]]
location = "registry-1.docker.io"                                                              

[[registry.mirror]]
location = "123456789012.dkr.ecr.us-east-1.amazonaws.com/docker-io"

I hope you are using ecr-login for your ECR registries ;)

export REGISTRY_AUTH_FILE=$HOME/.config/containers/auth.json
{
  "auths": {
    "docker.io": {
      "auth": "eGw4ZGVwXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXem40VQ=="
    },
    "gitlab.com": {
      "auth": "cmVXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXSYQ=="
    },
    "registry.gitlab.com": {
      "auth": "cmVXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXSYQ=="
    }
  },
  "credHelpers": {
    "*": "",
    "123456789012.dkr.ecr.us-east-1.amazonaws.com": "ecr-login",
    "345678901234.dkr.ecr.us-east-1.amazonaws.com": "ecr-login"
  }
}

K8s Quickfix: Rewriting Existing K8s Resources


$ cd $(mktemp -d)

$ (
  kubectl get pods --field-selector=status.phase=Pending -A -ojson | jq -c '.items[]';
  kubectl get deployments -ojson -A | jq -c '.items[]';
  kubectl get replicasets -ojson -A | jq -c '.items[]';
  kubectl get daemonsets -ojson -A | jq -c '.items[]';
) > /tmp/cluster.jsonl

$ cat /tmp/cluster.jsonl \
  | jq -r '
    def parse_into_parts:
      . as $i
      |capture(
        "^((?<host>[a-zA-Z0-9-]+\\.[a-zA-Z0-9.-]+)/)?"  
        + "(:(?<port>[0-9]+))?"
        + "((?<path>[a-zA-Z0-9-._/]+)/)?"
        + "(?<image>[a-zA-Z0-9-._]+)"
        + "((:(?<tag>[a-z0-9_.-]+))|(@(?<digest>sha256:[a-z0-9]+)))?$"
      ) // error("couldnt parse \($i)");

    def qualify_oci_image:
      if (.host==null) then .host="docker.io" end
      |if (.path==null and .host=="docker.io") then .path="library" end
      # |if (.tag==null and .digest==null) then .tag="latest" end
      ;

    def glue_parts:
      [
        if (.host) then .host else "" end,
        if (.port) then ":\(.port)" else "" end,
        if (.host) then "/" else "" end,
        if (.path) then "\(.path)/" else "" end,
        .image,
        if (.digest) then "@\(.digest)" elif (.tag) then ":\(.tag)" else "" end
      ]|join("")
      ;

    def fix_oci_image:
      . as $i
      |parse_into_parts
      |qualify_oci_image
      |if (.path=="bitnami") then .path="bitnamilegacy" else . end
      |if (.host=="docker.io") then (.host="123456780123.dkr.ecr.us-east-1.amazonaws.com"|.path="docker-io/\(.path)") else . end
      |glue_parts;
    
    [
      ..|objects|(.initContainers[]?,.containers[]?)
      |(.image|fix_oci_image) as $newImage
      |select(.image!=$newImage)
      |"\(.name)=\($newImage)"
    ] as $p
    |select($p|length > 0)
    |"kubectl set image \(.kind) -n \(.metadata.namespace) \(.metadata.name) \($p|join(" "))"
    

Permanent Mirror Configuration for containerd

(
	# patch /etc/containerd/config.toml for automatically picking dockerhub mirror

	containerd_config_version="$(grep -oP '^\s*version\s*=\s*\K\d+' /etc/containerd/config.toml)"
	p=""
	case "$containerd_config_version" in
		2) p="io.containerd.grpc.v1.cri";;
		3) p="io.containerd.cri.v1.images";;
		*) echo "unsupported"; return;;
	esac
	cat <<-EOM >> /etc/containerd/config.d/dockerhub-mirrors.toml
[plugins]

  [plugins."$p".registry]

    [plugins."$p".registry.mirrors]

      [plugins."$p".registry.mirrors."docker.io"]
        endpoint = [
          "public.ecr.aws/docker",
          "mirror.gcr.io",
          "gitlab.com/acme-org/dependency_proxy/containers",
          "123456789012.dkr.ecr.us-east-1.amazonaws.com/docker-io",
          "docker.io",
        ]

    [plugins."$p".registry.configs]
      [plugins."io.containerd.grpc.v1.cri".registry.configs."gitlab.com".auth]
      	# https://gitlab.com/groups/acme-org/-/settings/access_tokens?page=1
        username = "dependency-proxy"
        password = "glpat-XXXXXXXXXXXXXXXXXXXX"

      [plugins."$p".registry.configs."docker.io".auth]
        username = "acme-org"
        password = "dckr_pat_3Xi_XXXXXXXXXXXXXXXXXXXXXXX"
        auth = "dckr_pat_3Xi_XXXXXXXXXXXXXXXXXXXXXXX"
EOM		
	fi
)

if ! containerd config dump 1>/dev/null; then
   echo "exiting since containerd config is bad" >&2
   exit 1
fi

Craftsmanship And The Right Tools for Your Job

I wonder, why I my hardware was always superior than the one my companies provided me with. Shouldn’t they be interested in getting best quality? Would I order an electrician, and then forbid him to use his tools and give him my IKEA toolbox? – I wouldn’t!

A Software Craftsman’s tools are not mere instruments; they are his accumulated skill, capital, and tradition made tangible. I believe you have to care for your tools. Also, ★nix craftsmen often tend to solve problems with the capabilities of their systems.

Microsoft WSL2 kernel modifications

If you want to dig into: for now it seems all to be HyperV related. I maybe wrong since I haven’t reviewed the code itself.

It’s based on the next 5.4.x kernel - probably since Ubuntu Focal also has 5.4 on LTS.

git clone --depth 1 --branch v5.4.51 https://github.com/gregkh/linux.git upstream & pid1=$!
git clone --depth 1 --branch linux-msft-5.4.51  https://github.com/microsoft/WSL2-Linux-Kernel.git wsl2 & pid2=$!
wait $pid1 $pid2
File differences
diff -qr --exclude=.git upstream wsl2 | tee diff.txt
cat  \
    <(cat diff.txt | grep -oP '(Files upstream/)\K[^ ]+') \
    <(cat diff.txt | grep -oP '(Only in wsl2/)\K.+' | sed 's|: |/|g') \
    | sort -u \
    | (while read f; do if [[ -f "wsl2/$f" ]]; then echo "$f"; fi; done;) \
| tee files.txt
Get all commits on diffed files

Improvement: Diff the original commit list with WSL source

Techstack n - 1 is dead!

TL;DR TechStack n-1 is dead. It ended with the rise of the clouds and software release cycles going down to weeks due to containerized CIs.

Against ‘it’s stable and mature so let it run’

Death of Sophocles
The Death of Sophocles (Creative Commons)

Beeing OpenSource-based, Ubuntu already had the concept of point releases every 6 months when the Docker and K8s hit the world and gave automated CIs a big boost in making system containers. Some years after Docker itself switched to a 3-month release cycle. So did the Linux Kernel with 2-3 months. Firefox 4-weeks.

Infojunk November 2018

This is a collection of interesting links and resources I came across in November 2018, covering topics such as security, Linux, AWS, and development.

Hacking / MITM-API-Testing

Linux

Windows

Python

KataCode

Spectre/Meltdown

Project

Security

Tools

AWS

Development

Other

Connect to GitLab via SSH

Start an SSH Agent

If you haven’t already done so, add the following command to your shell’s RC file (such as .bashrc or .zshrc) to start the ssh-agent:

$ eval $(ssh-agent)

Add Your Generated Key

Use the ssh-add command to add your private SSH key (assuming it is the default id_rsa file) to the agent:

$ ssh-add ~/.ssh/id_rsa

List Keys

You can list the keys currently loaded by the ssh-agent using the following command:

Infojunk October 2018

This is a collection of interesting links and resources I came across in October 2018, covering a wide range of topics including browser extensions, collaborative coding, Linux, AWS, and more.

Browser Extensions

Collaborative Coding

Focusing on IDEs. Web-based solutions are mostly ignored.

Linux

NodeJS

DevOps

AI/MachineLearning

AWS

JmesPath is not as powerful as jq, but Amazon AWS probably chose it since it might be faster and the query-selectors are a bit more sophisticated (?).

PulseAudio: Mono-Sink Audio

Just in case your 10,000+ employee corporation doesn’t plug in the microphone jack correctly and no one is allowed to ask questions (presentation-only).


Creating a Mono Audio Sink with PulseAudio

To force stereo audio output into a single mono channel, you can use the PulseAudio module module-remap-sink. This is often useful for presentations or when hardware is misconfigured (e.g., a microphone is plugged into an unbalanced stereo input, but only one channel is picked up).

AWS sync is not reliable!

While migrating from s3cmd to the AWS S3 CLI, I noticed that files did not reliably sync when using the AWS CLI.

I tested this behavior with different versions, and they all exhibited the same issue:


Test Setup

  1. Set up the AWS CLI utility and configure your credentials.

  2. Create a testing S3 bucket.

  3. Set up some random files:

    # Create 10 random files of 10MB each
    for i in {1..10}; do dd if=/dev/urandom of=multi/part-$i.out bs=1MB count=10; done;
    # Then copy the first 5 files over
    mkdir multi-changed
    cp -r multi/part-{1,2,3,4,5}.out multi-changed
    # And replace the content in the remaining 5 files (6-10)
    for i in {6..10}; do dd if=/dev/urandom of=multi-changed/part-$i.out bs=1MB count=10; done;
    

Testing S3 sync with AWS CLI

Cleanup

$ aws s3 rm s3://l3testing/multi --recursive

Inital sync

$ aws s3 sync multi s3://l3testing/multi
upload: multi/part-1.out to s3://l3testing/multi/part-1.out       
upload: multi/part-3.out to s3://l3testing/multi/part-3.out     
upload: multi/part-2.out to s3://l3testing/multi/part-2.out     
upload: multi/part-4.out to s3://l3testing/multi/part-4.out     
upload: multi/part-10.out to s3://l3testing/multi/part-10.out   
upload: multi/part-5.out to s3://l3testing/multi/part-5.out     
upload: multi/part-6.out to s3://l3testing/multi/part-6.out     
upload: multi/part-8.out to s3://l3testing/multi/part-8.out     
upload: multi/part-7.out to s3://l3testing/multi/part-7.out     
upload: multi/part-9.out to s3://l3testing/multi/part-9.out

Update files

Only 5 files should now be uploaded. Timestamps for all 10 files should be changed.

Infojunk September 2018

This is a collection of interesting links and resources I came across in September 2018, covering topics like Kotlin, Python, Markdown, note-taking apps, web development, Linux, and more.

Kotlin

Python

Markdown Notetaking

Some notetaking apps you should give a try. At least Notion is very promising (yet you have to pay).

Connecting to CheckPoint VPN SNX in Linux

This guide explains how to connect to CheckPoint VPN using SNX (SSL Network Extender) on Linux systems.

Prerequisites

Ensure you have received the following information from your VPN administrator:

Please use that information to replace placeholders in scripts found in this tutorial.

Installation Script

You can either download from their website (crappy and frustrating) or get it directly via http://gateway-ip.

Connecting to Checkpoint QVPN SXN in Linux

This tutorial provides a step-by-step guide on how to connect to a Checkpoint QVPN on a Linux system. It covers prerequisites, installation of the snx client, and automation scripts for connecting and disconnecting.

Prerequisites

Ensure you have received their E-Mail and following information:

Please use that information to replace placeholders in scripts found in this tutorial.

Installation script

You can either download from their website (which can be frustrating) or get it directly via http://gateway-ip.

Infojunk August 2018

This is a collection of interesting links and resources I came across in August 2018, covering topics like Linux, Apache, hardware, coding, Python, and math.

Linux

It’s about responsiveness - not about the best performance!

Apache

Hardware

Coding

Python

Yes, a deep dive into Python. And I don’t like it. As well as PHP. Do Rust. Trust me!

Albert Launcher 0.14: Switch Application Window Plugin

This post introduces a Python extension for the Albert Launcher that allows you to switch between application windows.

Since I really don’t like the Switcher Plugin for GNOME and I’m a fan of Albert Launcher, I created this extension. Maybe it will get accepted into their python extensions.

Drop the following code to ~/.local/share/albert/org.albert.extension.python/switch-app-window.py or other provided locations and activate it within the extensions menu:

Albert Extension Menu
import re
import subprocess

from albertv0 import *

__iid__ = "PythonInterface/v0.1"
__prettyname__ = "Switch App Window"
__version__ = "1.0"
__trigger__ = "w "
__author__ = "Markus Geiger <mg@evolution515.net>"
__id__ = "window"
__dependencies__ = []

iconPath = iconLookup("go-next")

def handleQuery(query):
    stripped = query.string.strip()
    if not query.isTriggered and not stripped:
        return

    results = []
    process = subprocess.Popen(['wmctrl', '-l'], stdout=subprocess.PIPE, encoding='utf8')

    output, error = process.communicate()

    patt = re.compile(r'^(\w+)\s+(\d+)\s+([^\s]+)\s+(.+)$')
    window_re = re.compile(r'^(.+)\s+-\s+(.+)$')

    for line in output.split('\n'):
        match = patt.match(line)
        if not match:
            continue

        window_id = match.group(1)
        fulltitle = match.group(4)
        if not query.string.lower() in fulltitle.lower():
            continue

        titlematch = window_re.match(fulltitle)

        if titlematch:
            windowtitle = titlematch.group(1)
            program_title = titlematch.group(2)
        else:
            program_title = fulltitle
            windowtitle = fulltitle

        results.append(
            Item(
                id="%s_%s" % (__id__, window_id),
                icon=iconPath,
                text=program_title,
                subtext=windowtitle,
                completion=query.rawString,
                actions=[
                    ProcAction("Focus", ["wmctrl", "-ia", window_id]),
                    ProcAction("Close", ["wmctrl", "-ic", window_id])
                ]
            )
        )
    return results

Kernel Housekeeper Update Script

This post provides a bash script for Linux kernel management, specifically for those using mainline kernels on Ubuntu Bionic Beaver. The script helps in removing old kernels and installing the latest stable version.

I use this script for Kernel Housekeeping since I’m mostly on the mainline kernel. The script is currently used with Ubuntu Bionic Beaver.

#!/bin/bash

function version_gt() { test "$(echo \"$@\" | tr \" \n | sort -V | head -n 1)" != \"$1\"; }
function version_le() { test "$(echo \"$@\" | tr \" \n | sort -V | head -n 1)" == \"$1\"; }
function version_lt() { test "$(echo \"$@\" | tr \" \n | sort -rV | head -n 1)" != \"$1\"; }
function version_eq() { test "$(echo \"$@\" | tr \" \n | sort -rV | head -n 1)" == \"$1\"; }
#if version_gt $LATEST_KERNEL_VERSION_SHORT $CURRENT_KERNEL_VERSION_SHORT; then
#   echo \"$LATEST_KERNEL_VERSION_SHORT is greater than $CURRENT_KERNEL_VERSION_SHORT\"
# fi

OLD_IFS="$IFS"

IGNORE_PACKAGES="linux-headers-generic|linux-image-generic"
DUMP="$( curl -s \"http://kernel.ubuntu.com/~kernel-ppa/mainline/?C=M;O=A\" )"
HAS_INTERNET=0
if [ \"$?" -eq \"0\" ] && [ -n \"$DUMP\" ]; then
LATEST_KERNEL_VERSION="$( echo \"$DUMP\" | sed 's/<\/*[^>]*>\/*//g' | grep -E -o \"^v[^/]+\" |  sort -V  | grep -v '\-rc' | tail -1 | sed -r 's/^v//g' )"
LATEST_KERNEL_VERSION_SHORT="$( echo $LATEST_KERNEL_VERSION | tr '.-' '.' | cut -d. -f1-3 )"
HAS_INTERNET=1
else
LATEST_KERNEL_VERSION=\"Unable to resolve\"
fi

# Test if we have a generic kernel version
GENERIC_KERNEL_VERSION=$( dpkg-query -W -f'${db:Status-Abbrev} ${Package} ${Version}\n'  | grep -e '^ii' | grep 'linux-image-generic' | awk '{ print $3 }'  )
echo \"Generic Kernel Version\"
echo "======================"
if [ -n "$GENERIC_KERNEL_VERSION" ]; then
GENERIC_KERNEL_VERSION_SHORT="$( echo $GENERIC_KERNEL_VERSION | tr '.-' '.' | cut -d. -f1-3 )"
echo \"$GENERIC_KERNEL_VERSION_SHORT ($GENERIC_KERNEL_VERSION)\"
IGNORE_PACKAGES="$IGNORE_PACKAGES|linux-.*-$GENERIC_KERNEL_VERSION_SHORT-"
# echo $IGNORE_PACKAGES;
else
echo \"Not installed\".
fi
echo

CURRENT_KERNEL_VERSION=$( uname -r | cut -d- -f1,2 )
CURRENT_KERNEL_VERSION_SHORT="$( echo $CURRENT_KERNEL_VERSION | tr '.-' '.' | cut -d. -f1-3 )"
echo Current Kernel
echo "=============="
echo \"$CURRENT_KERNEL_VERSION ($CURRENT_KERNEL_VERSION_SHORT)\"
echo

echo "Latest Kernel (stable)"
echo "======================"
echo \"$LATEST_KERNEL_VERSION ($LATEST_KERNEL_VERSION_SHORT)\"
echo

IGNORE_PACKAGES="$IGNORE_PACKAGES|linux-.*$CURRENT_KERNEL_VERSION-"

# TODO add /boot/efi support
echo "Partitions "
echo "==========="
BOOT_PARTITIONS="$( df -h --output=target | grep "/boot" | tr '\n' ':' )"
if [ -z "$BOOT_PARTITIONS" ]; then
echo "No special partitions."
else
IFS=':'
for boot_partition in $BOOT_PARTITIONS; do
boot_part_total=$( df $boot_partition -h --output=size | tail -n 1 | tr -d '[:space:]' )
boot_part_avail=$( df $boot_partition -h --output=avail | tail -n 1 | tr -d '[:space:]' )
echo
echo \"$boot_partition size=$boot_part_total, free=$boot_part_avail\"
# Installed kernels

if [ ! "$( echo $boot_partition | grep efi > /dev/null 2>&1; echo $? )" -eq 0 ]; then
INSTALLED="$( test -r ${boot_partition} && ls -tr ${boot_partition}/{vmlinuz,initrd.img}-* | cut -d- -f2 | sort | uniq | tr '\n' ':'  )"
IFS=':'
for version in $INSTALLED; do
echo -n " $version taking "
SIZE=$( cd $boot_partition; ls -1  | grep "\-${version}-" | xargs du -shc | tail -1 | cut -f 1 )
echo $SIZE
done
fi;
done;
fi
echo

echo "Installed packages"
echo "=================="
echo

ALL_INSTALLED_PACKAGES=$( dpkg-query -W -f'${db:Status-Abbrev} ${Package} ${Version}\n'  | grep -e '^ii' | grep -e 'linux-image\|linux-signed-image\|linux-headers' | awk '{print $2}' | sort -V  )
echo $ALL_INSTALLED_PACKAGES
echo

echo "Removeable packages"
echo "==================="
echo

# config files
REMOVEABLE_PACKAGES="$( dpkg-query -W -f'${db:Status-Abbrev} ${Package} ${Version}\n'  | grep -e '^rc' | grep -e 'linux-image\|linux-signed-image\|linux-headers' | awk '{print $2}' | sort | grep -v -E \"$IGNORE_PACKAGES\" )"

for tag in "linux-image" "linux-headers" "linux-signed-image" "linux-image-extra"; do
# echo c="$tag-$CURRENT_KERNEL_VERSION-"
packages_to_be_removed="$( echo $ALL_INSTALLED_PACKAGES | grep $tag | sort -V | awk 'index($0,c){exit} //' c="$tag-$CURRENT_KERNEL_VERSION" | grep -v -E \"$IGNORE_PACKAGES\" )"
REMOVEABLE_PACKAGES="$( echo -e \"$REMOVEABLE_PACKAGES\n$packages_to_be_removed\" | sed '/^$/d' | sort -V | uniq )"
done;

if ! [ $(id -u) = 0 ]; then
echo \"You need to be root! Aborting...\"
exit 1
fi

if [ -z "$REMOVEABLE_PACKAGES" ]; then
echo "No packages to remove found!"
echo
else
echo $REMOVEABLE_PACKAGES
read -p "Remove (y/n)?" CHOICE
echo

case "$CHOICE" in
y|Y )
CMD="apt-get remove --purge $( echo $REMOVEABLE_PACKAGES | tr '\n' ' ' )"
# echo $CMD
eval "$CMD"
apt-get autoremove
echo
;;
* ) ;;
esac
fi

##
# Install packages from latest kernel
if [ $HAS_INTERNET == 1 ]; then

echo "Kernel upgrade"
echo "=============="
echo

LATEST_INSTALLED=$( echo $ALL_INSTALLED_PACKAGES | grep "linux-image-$LATEST_KERNEL_VERSION-" )
if [ -n "$LATEST_INSTALLED" ]; then
echo "$LATEST_KERNEL_VERSION already installed. No need to update."
echo
else
PACKAGES="$( curl -s \"http://kernel.ubuntu.com/~kernel-ppa/mainline/v$LATEST_KERNEL_VERSION_SHORT/\" |  grep -o -E 'linux-[_[:alnum:]\.\-]+\.deb' | sort | uniq )"
# echo $PACKAGES
MACHINE_TYPE=$(uname -m)
if [ ${MACHINE_TYPE} == 'x86_64' ]; then
ARCH="amd64"
else
ARCH="i386"
fi
INSTALL_PACKAGES="$( echo $PACKAGES | grep -E \"_($ARCH|all)\" |  grep -v lowlatency )"
if [ -z "$INSTALL_PACKAGES" ]; then
echo "No packages to install found. Check your script for URL errors!" 1>&2
exit 1
fi

echo "Following packages will be installed:"
echo $INSTALL_PACKAGES
echo
read -p "Continue (y/n)?" CHOICE
echo

# echo $INSTALL_PACKAGES
# exit
TMP=$( mktemp -d --suffix=.kernel-$LATEST_KERNEL_VERSION )
cd $TMP

IFS=$'\n'
URLS=""
for package in $INSTALL_PACKAGES; do
echo -n Downloading $package...
url="http://kernel.ubuntu.com/~kernel-ppa/mainline/v$LATEST_KERNEL_VERSION/$package"
wget  -q "$url"
if [ "$?" -eq 0 ]; then
echo done.
else
echo failed. aborting.
exit 1
fi
done
if [ -n "$INSTALL_PACKAGES" ]; then
dpkg -i *.deb
fi
fi
fi

Update Confluence Page by API

You can create you own API token here: https://id.atlassian.com/manage/api-tokens and live-update any information you want. The script basicaly creates a HTML file, pumps it by JQ into a JSON-file and uploads it.

#!/bin/bash
# Update Confluence page by API

# Strict mode
set -euo pipefail

# Some informations
PAGEID=602767382
SPACE="EL3"
AUTH="user@example.com:GETYOUROWNTOKENORNEVERKNOW"
API_URL="https://mycompany.atlassian.net/wiki/rest/api"

# Create temp dir
TMP=$( mktemp -d )

# Shutdown handler
shutdown() {
# Cleanup temp directory
if [ -e "$TMP" ]; then
rm -fr "$TMP"
fi
}
trap shutdown TERM EXIT

# We first need current page version for update with next-page version
curl --silent --user ${AUTH} ${API_URL}/content/${PAGEID} &gt; ${TMP}/current.json
VERSION=$( cat ${TMP}/current.json | jq '.version.number' )
NEXTVERSION=$( expr 1 + ${VERSION} )
echo Got Version: ${VERSION}

# Get information
create page.txt

# Create HTML file
echo "

Date of creation: $( date --utc )
&lt;pre&gt;$( cat ${TMP}/page.txt | sed 's/$/&lt;br\&gt;/g' | tr -d '\n' )&lt;/br\&gt;&lt;/pre&gt;
" &gt; ${TMP}/page.html

# Prepare upload JSON with JQ
cat ${TMP}/page.html | jq -sR "@text | {\"id\":\"$PAGEID\",\"type\":\"page\",\"title\":\"Information Gathering\",\"space\":{\"key\":\"${SPACE}\"},\"body\":{\"storage\":{\"value\": . ,\"representation\":\"storage\"}},\"version\":{\"number\":${NEXTVERSION}}}"  &gt; ${TMP}/upload.json

# Upload
curl \
--silent \
--user ${AUTH} \
-X PUT -H 'Content-Type: application/json' \
-T ${TMP}/upload.json \
${API_URL}/content/${PAGEID} \
1&gt;/dev/null

echo Updated Version: ${NEXTVERSION}

IP in VPN vs. LAN: Alias IP Address by iptables

Scenario: Using a Consistent IP Address

When you’re at work, you are on the LAN and use an IP address like 192.168.x.x. When you work from home, you connect via VPN to the same database (DB), and your IP address changes to 10.x.x.x. You want to avoid changing configuration files for your application every time you switch environments.

This problem can be easily worked around using iptables to create an IP address alias.

Laptop Performance: irqbalancer vs. intel_pstate

Today I uninstalled irqbalancer and noticed a performance gain on my GNOME desktop.

The CPUfreq control panel showed me IRQBALANCE DETECTED, and they state the following:

Why I should not use a single core for power saving

These points are stated very simply. I feel there are some contradictions here.