Wednesday, June 19, 2019

ACL Haproxy

root@qqmelo1c:~/postgres# cat /etc/haproxy/haproxy.cfg
global
        log /dev/log    local0
        log /dev/log    local1 notice
        chroot /var/lib/haproxy
        stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
        stats timeout 30s
        user haproxy
        group haproxy
        daemon

        # Default SSL material locations
        ca-base /etc/ssl/certs
        crt-base /etc/ssl/private

        # Default ciphers to use on SSL-enabled listening sockets.
        # For more information, see ciphers(1SSL). This list is from:
        #  https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
        # An alternative list with additional directives can be obtained from
        #  https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
        ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
        ssl-default-bind-options no-sslv3

defaults
        log     global
        mode    http
        option  httplog
        option  dontlognull
        timeout connect 5000
        timeout client  50000
        timeout server  50000
        errorfile 400 /etc/haproxy/errors/400.http
        errorfile 403 /etc/haproxy/errors/403.http
        errorfile 408 /etc/haproxy/errors/408.http
        errorfile 500 /etc/haproxy/errors/500.http
        errorfile 502 /etc/haproxy/errors/502.http
        errorfile 503 /etc/haproxy/errors/503.http
        errorfile 504 /etc/haproxy/errors/504.http


listen 3.16.21.136
    bind *:80
    mode http
    stats enable
    stats auth  cda:cda
    balance roundrobin
    option forwardfor
    #acl is_for_backend2 path_reg ^$|^/$|^/nodos|^/bpages
    acl is_for_backend2 path_beg -i /nodos
    use_backend backend2 if is_for_backend2

    default_backend backend1

    backend backend1
    server worker2 172.31.100.16:31759  check  #IP Privada frontal-01
    server worker1 172.31.104.150:31759  check #IP Privada frontal-01

    backend backend2
    server worker2 172.31.100.16:30737  check  #IP Privada frontal-01
    server worker1 172.31.104.150:30737  check #IP Privada frontal-01

Thursday, June 13, 2019

HAPROXY - K8s

En el nodo master, instale el haproxy.

Dejo el fichero de configuracion


global
        log /dev/log    local0
        log /dev/log    local1 notice
        chroot /var/lib/haproxy
        stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
        stats timeout 30s
        user haproxy
        group haproxy
        daemon

        # Default SSL material locations
        ca-base /etc/ssl/certs
        crt-base /etc/ssl/private

        # Default ciphers to use on SSL-enabled listening sockets.
        # For more information, see ciphers(1SSL). This list is from:
        #  https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
        # An alternative list with additional directives can be obtained from
        #  https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
        ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
        ssl-default-bind-options no-sslv3

defaults
        log     global
        mode    http
        option  httplog
        option  dontlognull
        timeout connect 5000
        timeout client  50000
        timeout server  50000
        errorfile 400 /etc/haproxy/errors/400.http
        errorfile 403 /etc/haproxy/errors/403.http
        errorfile 408 /etc/haproxy/errors/408.http
        errorfile 500 /etc/haproxy/errors/500.http
        errorfile 502 /etc/haproxy/errors/502.http
        errorfile 503 /etc/haproxy/errors/503.http
        errorfile 504 /etc/haproxy/errors/504.http


listen 192.168.0.140
    bind *:80
    mode http
    stats enable
    stats auth  cda:cda
    balance roundrobin
    option forwardfor
    server worker1 192.168.0.141:30224  check  #IP Privada frontal-01
    server worker2 192.168.0.142:30224  check #IP Privada frontal-01





es algo asi como 




Sunday, June 2, 2019

Armar un cluster Kubernetes

Let's begin our journey of learning Kubernetes by setting up a practice cluster. This will allow you to get hands-on with Kubernetes as quickly as possible, so that as you learn about various Kubernetes concepts you will be able to work with them in a real cluster if you choose. In this lesson, I will guide you through the process of setting up a simple Kubernetes cluster using Linux Academy's cloud playground. After completing this lesson, you will have a simple cluster that you can work with, and you will be familiar with the process of standing up a cluster on Ubuntu servers.
Here are the commands used in this lesson. Feel free to use them as a reference, or just use them to follow along!
Add the Docker Repository on all three servers.
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository    "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
   $(lsb_release -cs) \
   stable"
Add the Kubernetes repository on all three servers.
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat << EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
Install Docker, Kubeadm, Kubelet, and Kubectl on all three servers.
NOTE: There are some issues being reported when installing version 1.12.2-00 from the Kubernetes ubuntu repositories. You can work around this by using version 1.12.7-00 for kubelet, kubeadm, and kubectl.
sudo apt-get update
sudo apt-get install -y docker-ce=18.06.1~ce~3-0~ubuntu kubelet=1.12.7-00 kubeadm=1.12.7-00 kubectl=1.12.7-00
sudo apt-mark hold docker-ce kubelet kubeadm kubectl
Enable net.bridge.bridge-nf-call-iptables on all three nodes.
echo "net.bridge.bridge-nf-call-iptables=1" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
On only the Kube Master server, initialize the cluster and configure kubectl.
sudo kubeadm init --pod-network-cidr=10.244.0.0/16
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Install the flannel networking plugin in the cluster by running this command on the Kube Master server.
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
The kubeadm init command that you ran on the master should output a kubeadm join command containing a token and hash. You will need to copy that command from the master and run it on both worker nodes with sudo.
sudo kubeadm join $controller_private_ip:6443 --token $token --discovery-token-ca-cert-hash $hash
Now you are ready to verify that the cluster is up and running. On the Kube Master server, check the list of nodes.
kubectl get nodes
It should look something like this:
NAME                      STATUS   ROLES    AGE   VERSION
wboyd1c.mylabserver.com   Ready    master   54m   v1.12.2
wboyd2c.mylabserver.com   Ready       49m   v1.12.2
wboyd3c.mylabserver.com   Ready       49m   v1.12.2
Make sure that all three of your nodes are listed and that all have a STATUS of Ready.

Wednesday, April 3, 2019

GLPI en 2 minutos

Y bueno, si querian algo sencillo, ahi va.

primero armamos el projecto glpi

oc new-project glpi

y luego.

oc new-app docker.io/diouxx/glpi

(Tome esta por que parece ser la oficial )

luego, las rutinas de siempre

[root@qqmelo1c ~]# oc get pods
NAME            READY     STATUS              RESTARTS   AGE
glpi-1-deploy   1/1       Running             0          13s
glpi-1-kcbkm    0/1       ContainerCreating   0          10s
[root@qqmelo1c ~]# oc describe pod glpi-1-kcbkm
Name:               glpi-1-kcbkm
Namespace:          glpi
Priority:           0
PriorityClassName: 
Node:               qqmelo1c.mylabserver.com/172.31.38.153
Start Time:         Thu, 04 Apr 2019 02:47:47 +0000
Labels:             app=glpi
                    deployment=glpi-1
                    deploymentconfig=glpi
Annotations:        openshift.io/deployment-config.latest-version=1
                    openshift.io/deployment-config.name=glpi
                    openshift.io/deployment.name=glpi-1
                    openshift.io/generated-by=OpenShiftNewApp
                    openshift.io/scc=anyuid
Status:             Running
IP:                 10.128.0.136
.....

y luego, para ver que todo anduvo joya,

lynx 10.128.0.136.

y si aun andamos aburridos, tiramos algo de magia con

[root@qqmelo1c ~]# oc get pods glpi-1-kcbkm -o yaml > glpi.yml
[root@qqmelo1c ~]# cat glpi.yml
apiVersion: v1
kind: Pod
metadata:
  annotations:
    openshift.io/deployment-config.latest-version: "1"
    openshift.io/deployment-config.name: glpi
    openshift.io/deployment.name: glpi-1
    openshift.io/generated-by: OpenShiftNewApp
    openshift.io/scc: anyuid
  creationTimestamp: 2019-04-04T02:47:47Z
  generateName: glpi-1-
  labels:
    app: glpi
    deployment: glpi-1
    deploymentconfig: glpi
......