about summary refs log tree commit diff
diff options
context:
space:
mode:
authorPaweł Dybiec <pdybiec@stud.cs.uni.wroc.pl>2019-12-05 19:28:19 +0100
committerPaweł Dybiec <pdybiec@stud.cs.uni.wroc.pl>2019-12-05 19:28:19 +0100
commite485c44e2231789269f22663d7a928cd61ec3a79 (patch)
treeeee3595c5950d57a25e6b12ea4181ad46684e030
parentCleanup of nginx config and blocked some scanning server (diff)
Remove monitoring stuff
-rw-r--r--compose/es/Dockerfile5
-rw-r--r--compose/es/docker-compose.yml13
-rw-r--r--compose/es/kibana.yml5
-rw-r--r--compose/monitoring/prometheus/prometheus.yml40
4 files changed, 0 insertions, 63 deletions
diff --git a/compose/es/Dockerfile b/compose/es/Dockerfile
deleted file mode 100644
index 8e67d74..0000000
--- a/compose/es/Dockerfile
+++ /dev/null
@@ -1,5 +0,0 @@
-FROM python:3.4-alpine
-ADD . /code
-WORKDIR /code
-RUN pip install -r requirements.txt
-CMD ["python", "app.py"]
diff --git a/compose/es/docker-compose.yml b/compose/es/docker-compose.yml
deleted file mode 100644
index 3edf4c0..0000000
--- a/compose/es/docker-compose.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-version: '3'
-services:
-  elasticsearch:
-    image: "docker.elastic.co/elasticsearch/elasticsearch:6.3.1"
-    environment:
-    - "discovery.type=single-node"
-    #ports:
-    # - "9200:9200"
-    # - "9300:9300"
-  kibana:
-    image: "docker.elastic.co/kibana/kibana:6.3.1"
-    ports:
-    - "5601:5601"
diff --git a/compose/es/kibana.yml b/compose/es/kibana.yml
deleted file mode 100644
index 59691ca..0000000
--- a/compose/es/kibana.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-server.name: kibana
-server.host: "0"
-elasticsearch.url: http://elasticsearch:9200
-xpack.monitoring.ui.container.elasticsearch.enabled: true
-xpack.monitoring.enabled: true
diff --git a/compose/monitoring/prometheus/prometheus.yml b/compose/monitoring/prometheus/prometheus.yml
deleted file mode 100644
index 2903091..0000000
--- a/compose/monitoring/prometheus/prometheus.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-# my global config
-global:
-  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
-  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
-  # scrape_timeout is set to the global default (10s).
-
-  # Attach these labels to any time series or alerts when communicating with
-  # external systems (federation, remote storage, Alertmanager).
-  external_labels:
-      monitor: 'codelab-monitor'
-
-# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
-rule_files:
-  # - "first.rules"
-  # - "second.rules"
-
-# A scrape configuration containing exactly one endpoint to scrape:
-# Here it's Prometheus itself.
-scrape_configs:
-  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
-  - job_name: 'prometheus'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-      - targets: ['localhost:9090']
-
-  - job_name: 'docker'
-         # metrics_path defaults to '/metrics'
-         # scheme defaults to 'http'.
-
-    static_configs:
-      - targets: ['dockerhost:9323']
-  - job_name: 'cadvisor'
-         # metrics_path defaults to '/metrics'
-         # scheme defaults to 'http'.
-
-    static_configs:
-      - targets: ['cadvisor:8080']