Skip to content

Add resource name field to outgoing sg-core metrics #21

Add resource name field to outgoing sg-core metrics

Add resource name field to outgoing sg-core metrics #21

Workflow file for this run

name: Integration testing
env:
PROJECT_ROOT: /root/go/src/github.com/infrawatch/sg-core
OPSTOOLS_REPO: https://git.centos.org/rpms/centos-release-opstools/raw/c8s-sig-opstools/f/SOURCES/CentOS-OpsTools.repo
QDR_IMAGE: quay.io/interconnectedcloud/qdrouterd:1.17.0
QDR_VOLUME: "--volume=${{ github.workspace }}/ci/service_configs/qdr:/etc/qpid-dispatch:ro"
QDR_PORT: "-p 5666:5666"
BRIDGE_IMAGE: quay.io/infrawatch/sg-bridge:latest
BRIDGE_VOLUME: "--volume=${{ github.workspace }}/tmp:/tmp/sg-bridge:z"
TEST_IMAGE: registry.access.redhat.com/ubi8
TEST_PORT: "-p 3000:3000"
on: [push, pull_request]
jobs:
metrics:
name: "[metrics] handler: ceilometer-metrics, collectd-metrics; application: prometheus"
runs-on: ubuntu-20.04
env:
QDR_CHANNEL_CEILOMTR: ceilometer/metering.sample
QDR_CHANNEL_COLLECTD: collectd/metrics
BRIDGE_SOCKET: /tmp/sg-bridge/test-socket
PROMETHEUS_IMAGE: prom/prometheus:latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Prepare environment
run: |
mkdir -p /opt/stack/
sudo setfacl -Rdm u::7,g::0,o:0 /opt/stack
- name: Prepare environment for mysql-server installation # https://stackoverflow.com/a/66026366
run: |
sudo apt-get -f install -o Dpkg::Options::="--force-overwrite"
sudo apt-get purge mysql\*
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
sudo dpkg -l | grep -i mysql
sudo apt-get clean
- name: Install devstack
run: |
SOURCE=$(pwd)
git clone https://github.com/openstack/devstack /opt/stack/devstack
pushd /opt/stack/devstack
cp $SOURCE/ci/integration/metrics/local.conf .
sudo apt-get update
./stack.sh
popd
# start message bus services
- name: Start QDR service
run: |
docker run --name=qdr $QDR_VOLUME $QDR_PORT -d $QDR_IMAGE
- name: Check if sg-bridge repository has same topic branch
id: bridge_branch
continue-on-error: true
run: |
echo "${GITHUB_REF#refs/heads/}"
git ls-remote --exit-code --heads https://github.com/infrawatch/sg-bridge.git "$(echo ${GITHUB_REF#refs/heads/})"
- name: Start sg-bridge for collectd from container image
if: steps.bridge_branch.outcome != 'success'
run: |
docker run --name=sgbridge --network host $BRIDGE_VOLUME -d \
$BRIDGE_IMAGE --amqp_url amqp:https://localhost:5666/$QDR_CHANNEL_COLLECTD \
--gw_unix=$BRIDGE_SOCKET
- name: Start sg-bridge for collectd with same branch
if: steps.bridge_branch.outcome == 'success'
run: |
docker run --name=sgbridge --network host $BRIDGE_VOLUME -d -uroot \
-e GITHUB_REF -e BRIDGE_SOCKET -e QDR_CHANNEL_COLLECTD -e OPSTOOLS_REPO \
--workdir=$(dirname $BRIDGE_SOCKET) \
$TEST_IMAGE bash $PROJECT_ROOT/ci/integration/metrics/run_bridge.sh
- name: Set Ceilometer pipelines to QDR output and restart notification agent
run: |
sudo apt-get install -y crudini
echo addressing_mode="dynamic" | crudini --merge /etc/ceilometer/ceilometer.conf oslo_messaging_amqp
echo pseudo_vhost=true | crudini --merge /etc/ceilometer/ceilometer.conf oslo_messaging_amqp
echo rpc_address_prefix="" | crudini --merge /etc/ceilometer/ceilometer.conf oslo_messaging_amqp
echo notify_address_prefix="" | crudini --merge /etc/ceilometer/ceilometer.conf oslo_messaging_amqp
cp ci/integration/metrics/*pipeline.yaml /etc/ceilometer/.
cat /etc/ceilometer/*
sudo pip install pyngus
sudo systemctl restart [email protected]
- name: Install collectd
run: |
sudo apt-get install collectd
sudo systemctl stop collectd && sudo systemctl disable collectd
sudo cp ci/integration/metrics/collectd.conf /etc/collectd/collectd.conf
sudo touch /var/log/collectd.log && sudo chmod a+rw /var/log/collectd.log
sudo collectd -C ci/integration/metrics/collectd.conf
- name: Run sg-core to process metrics
run: |
docker run --name=sgcore -d -uroot --network host $BRIDGE_VOLUME -e OPSTOOLS_REPO \
--volume ${{ github.workspace }}:$PROJECT_ROOT:z --workdir $PROJECT_ROOT \
$TEST_IMAGE bash $PROJECT_ROOT/ci/integration/metrics/run_sg.sh
- name: Run Prometheus to store metrics
run: |
docker run --name=prometheus -d --network host \
--volume ${{ github.workspace }}/ci/integration/metrics/prometheus.yml:/etc/prometheus/prometheus.yml:ro \
$PROMETHEUS_IMAGE
- name: Debug output
run: |
sleep 360
echo "=========================== qdr =========================" && \
docker exec qdr qdstat -b 127.0.0.1:5666 -a
echo "========================= sg-core =======================" && \
docker logs sgcore
echo "======================== collectd =======================" && \
cat /var/log/collectd.log
echo "========================= ceilometer ====================" && \
sudo journalctl -xu [email protected]
echo "========================= sg-core =======================" && \
docker logs prometheus
- name: Validate metrics processing
run: |
docker run --name=validate -uroot --network host \
--volume ${{ github.workspace }}:$PROJECT_ROOT:z --workdir $PROJECT_ROOT \
$TEST_IMAGE bash $PROJECT_ROOT/ci/integration/metrics/run_validation.sh
#-------------------------------------------------------------------------------
logging:
name: "[logging] handler: logs; application: elasticsearch, loki"
runs-on: ubuntu-20.04
env:
BRIDGE_SOCKET: /tmp/sg-bridge/test-socket
LOKI_IMAGE: quay.io/infrawatch/loki:2.4.2
LOKI_VOLUME: "--volume=${{ github.workspace }}/ci/service_configs/loki:/etc/loki:ro"
LOKI_PORT: "-p 3100:3100"
ELASTIC_IMAGE: docker.elastic.co/elasticsearch/elasticsearch:7.10.2
ELASTIC_PORT: "-p 9200:9200 -p 9300:9300"
RSYSLOG_IMAGE: quay.io/centos/centos:stream8
RSYSLOG_VOLUME: "--volume ${{ github.workspace }}/ci/service_configs/rsyslog/rsyslog_config.conf:/etc/rsyslog.d/integration.conf:z"
steps:
- name: Checkout code
uses: actions/checkout@v2
# start data store services
- name: Start Elasticsearch service
run: |
docker run --name elastic -e "discovery.type=single-node" $ELASTIC_PORT -d $ELASTIC_IMAGE
- name: Start Loki service
run: |
docker run --name=loki $LOKI_VOLUME $LOKI_PORT -d $LOKI_IMAGE
# start message bus services
- name: Start QDR service
run: |
docker run --name=qdr $QDR_VOLUME $QDR_PORT -d $QDR_IMAGE
- name: Check if sg-bridge repository has same topic branch
id: bridge_branch
continue-on-error: true
run: |
echo "${GITHUB_REF#refs/heads/}"
git ls-remote --exit-code --heads https://github.com/infrawatch/sg-bridge.git "$(echo ${GITHUB_REF#refs/heads/})"
- name: Start sg-bridge from container image
if: steps.bridge_branch.outcome != 'success'
run: |
docker run --name=sgbridge --network host $BRIDGE_VOLUME -d \
$BRIDGE_IMAGE --amqp_url amqp:https://localhost:5666/rsyslog/logs --gw_unix=$BRIDGE_SOCKET
- name: Start sg-bridge with same branch
if: steps.bridge_branch.outcome == 'success'
run: |
docker run --name=sgbridge $BRIDGE_VOLUME -d -uroot --network host -e OPSTOOLS_REPO \
-e GITHUB_REF -e BRIDGE_SOCKET --workdir=$(dirname $BRIDGE_SOCKET) \
$TEST_IMAGE bash $PROJECT_ROOT/ci/integration/logging/run_bridge.sh
- name: Run rsyslog to produce log messages
run: |
docker run --name=rsyslog -d -uroot --network host $RSYSLOG_VOLUME \
--volume ${{ github.workspace }}:$PROJECT_ROOT:z --workdir $PROJECT_ROOT \
$RSYSLOG_IMAGE bash $PROJECT_ROOT/ci/integration/logging/run_rsyslog.sh
- name: Wait for services to start successfuly
run: |
timeout=180
echo "======================= rsyslog ======================="
rsyslog_wait=0
while [[ $(docker exec qdr qdstat -b 127.0.0.1:5666 -a | grep rsyslog/logs | awk '{print $8}') -le 0 ]]
do
sleep 1
rsyslog_wait=$(($rsyslog_wait+1))
if [[ $rsyslog_wait -gt $timeout ]]; then
echo "ERROR: timeout for rsyslog startup"
break
fi
done
echo "INFO: rsyslog startup took ${rsyslog_wait}s"
echo "===================== elasticsearch ====================="
elastic_wait=0
while ! curl -sX GET "https://127.0.0.1:9200/_cluster/health"
do
sleep 1
elastic_wait=$(($elastic_wait+1))
if [[ $elastic_wait -gt $timeout ]]; then
echo "\nERROR: timeout for elasticsearch startup"
break
fi
done
echo "INFO: elasticsearch startup took ${elastic_wait}s"
echo "========================== loki ========================="
loki_wait=0
while ! curl -sX GET "https://127.0.0.1:3100/ready" | grep -q "^ready$"
do
sleep 1
loki_wait=$(($loki_wait+1))
if [[ $loki_wait -gt $timeout ]]; then
echo "ERROR: timeout for loki startup"
break
fi
done
echo "INFO: loki startup took ${loki_wait}s"
- name: Print container logs
run: |
echo "========================= rsyslog =======================" && \
docker logs rsyslog
echo "===================== elasticsearch =====================" && \
docker logs elastic
echo "========================== loki =========================" && \
docker logs loki
echo "========================== qdr ==========================" && \
docker logs qdr
echo "======================= sg-bridge =======================" && \
docker logs sgbridge
# run integration tests
- name: Run sg-core to process log messages
run: |
docker run --name=sgcore -d -uroot --network host $TEST_PORT $BRIDGE_VOLUME -e OPSTOOLS_REPO \
--volume ${{ github.workspace }}:$PROJECT_ROOT:z --workdir $PROJECT_ROOT \
$TEST_IMAGE bash $PROJECT_ROOT/ci/integration/logging/run_sg.sh
- name: sg-core debug output
run: |
timeout=360
sg_wait=0
while [[ $(curl -sX GET "https://127.0.0.1:3000/metrics" | grep 'sg_total_logs_received{source="SG"}' | awk '{print $2}') -le 0 ]]
do
sleep 1
sg_wait=$(($sg_wait+1))
if [[ $sg_wait -gt $timeout ]]; then
echo "ERROR: timeout for sg-core startup"
break
fi
done
echo "INFO: sg-core startup took ${sg_wait}s"
docker logs sgcore
docker exec qdr qdstat -b 127.0.0.1:5666 -c
docker exec qdr qdstat -b 127.0.0.1:5666 -a
- name: Validate log message processing
run: |
docker run --name=validate -uroot --network host \
--volume ${{ github.workspace }}:$PROJECT_ROOT:z --workdir $PROJECT_ROOT \
$TEST_IMAGE bash $PROJECT_ROOT/ci/integration/logging/run_validation.sh