فهرست منبع

docker: added graphite v1.x docker block

Torkel Ödegaard 8 سال پیش
والد
کامیت
0e2b809f7e
38فایلهای تغییر یافته به همراه1702 افزوده شده و 0 حذف شده
  1. 2 0
      docker/blocks/graphite/Dockerfile
  2. 93 0
      docker/blocks/graphite1/Dockerfile
  3. 11 0
      docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
  4. 36 0
      docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
  5. 96 0
      docker/blocks/graphite1/conf/etc/nginx/nginx.conf
  6. 31 0
      docker/blocks/graphite1/conf/etc/nginx/sites-enabled/graphite-statsd.conf
  7. 4 0
      docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
  8. 4 0
      docker/blocks/graphite1/conf/etc/service/carbon/run
  9. 3 0
      docker/blocks/graphite1/conf/etc/service/graphite/run
  10. 4 0
      docker/blocks/graphite1/conf/etc/service/nginx/run
  11. 4 0
      docker/blocks/graphite1/conf/etc/service/statsd/run
  12. 35 0
      docker/blocks/graphite1/conf/opt/graphite/conf/aggregation-rules.conf
  13. 5 0
      docker/blocks/graphite1/conf/opt/graphite/conf/blacklist.conf
  14. 75 0
      docker/blocks/graphite1/conf/opt/graphite/conf/carbon.amqp.conf
  15. 359 0
      docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
  16. 57 0
      docker/blocks/graphite1/conf/opt/graphite/conf/dashboard.conf
  17. 38 0
      docker/blocks/graphite1/conf/opt/graphite/conf/graphTemplates.conf
  18. 21 0
      docker/blocks/graphite1/conf/opt/graphite/conf/relay-rules.conf
  19. 18 0
      docker/blocks/graphite1/conf/opt/graphite/conf/rewrite-rules.conf
  20. 43 0
      docker/blocks/graphite1/conf/opt/graphite/conf/storage-aggregation.conf
  21. 17 0
      docker/blocks/graphite1/conf/opt/graphite/conf/storage-schemas.conf
  22. 6 0
      docker/blocks/graphite1/conf/opt/graphite/conf/whitelist.conf
  23. 94 0
      docker/blocks/graphite1/conf/opt/graphite/webapp/graphite/app_settings.py
  24. 215 0
      docker/blocks/graphite1/conf/opt/graphite/webapp/graphite/local_settings.py
  25. 6 0
      docker/blocks/graphite1/conf/opt/statsd/config.js
  26. 26 0
      docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
  27. 3 0
      docker/blocks/graphite1/conf/usr/local/bin/manage.sh
  28. 16 0
      docker/blocks/graphite1/fig
  29. 76 0
      docker/blocks/graphite1/files/carbon.conf
  30. 102 0
      docker/blocks/graphite1/files/events_views.py
  31. 20 0
      docker/blocks/graphite1/files/initial_data.json
  32. 42 0
      docker/blocks/graphite1/files/local_settings.py
  33. 1 0
      docker/blocks/graphite1/files/my_htpasswd
  34. 70 0
      docker/blocks/graphite1/files/nginx.conf
  35. 8 0
      docker/blocks/graphite1/files/statsd_config.js
  36. 19 0
      docker/blocks/graphite1/files/storage-aggregation.conf
  37. 16 0
      docker/blocks/graphite1/files/storage-schemas.conf
  38. 26 0
      docker/blocks/graphite1/files/supervisord.conf

+ 2 - 0
docker/blocks/graphite/Dockerfile

@@ -32,6 +32,7 @@ add ./files/my_htpasswd /etc/nginx/.htpasswd
 # Add system service config
 add ./files/nginx.conf /etc/nginx/nginx.conf
 add ./files/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
+
 # Nginx
 #
 # graphite
@@ -39,6 +40,7 @@ expose  80
 
 # Carbon line receiver port
 expose  2003
+
 # Carbon cache query port
 expose  7002
 

+ 93 - 0
docker/blocks/graphite1/Dockerfile

@@ -0,0 +1,93 @@
+FROM phusion/baseimage:0.9.22
+MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
+
+RUN apt-get -y update \
+  && apt-get -y upgrade \
+  && apt-get -y --force-yes install vim \
+  nginx \
+  python-dev \
+  python-flup \
+  python-pip \
+  python-ldap \
+  expect \
+  git \
+  memcached \
+  sqlite3 \
+  libffi-dev \
+  libcairo2 \
+  libcairo2-dev \
+  python-cairo \
+  python-rrdtool \
+  pkg-config \
+  nodejs \
+  && rm -rf /var/lib/apt/lists/*
+
+# fix python dependencies (LTS Django and newer memcached/txAMQP)
+RUN pip install django==1.8.18 \
+  python-memcached==1.53 \
+  txAMQP==0.6.2 \
+  && pip install --upgrade pip
+
+# install whisper
+RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
+WORKDIR /usr/local/src/whisper
+RUN python ./setup.py install
+
+# install carbon
+RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
+WORKDIR /usr/local/src/carbon
+RUN pip install -r requirements.txt \
+  && python ./setup.py install
+
+# install graphite
+RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
+WORKDIR /usr/local/src/graphite-web
+RUN pip install -r requirements.txt \
+  && python ./setup.py install
+ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
+ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
+ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
+WORKDIR /opt/graphite/webapp
+RUN mkdir -p /var/log/graphite/ \
+  && PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
+
+# install statsd
+RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
+ADD conf/opt/statsd/config.js /opt/statsd/config.js
+
+# config nginx
+RUN rm /etc/nginx/sites-enabled/default
+ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf
+ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf
+
+# init django admin
+ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
+ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
+RUN chmod +x /usr/local/bin/manage.sh \
+  && /usr/local/bin/django_admin_init.exp
+
+# logging support
+RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
+ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd
+
+# daemons
+ADD conf/etc/service/carbon/run /etc/service/carbon/run
+ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run
+ADD conf/etc/service/graphite/run /etc/service/graphite/run
+ADD conf/etc/service/statsd/run /etc/service/statsd/run
+ADD conf/etc/service/nginx/run /etc/service/nginx/run
+
+# default conf setup
+ADD conf /etc/graphite-statsd/conf
+ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh
+
+# cleanup
+RUN apt-get clean\
+ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
+
+# defaults
+EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
+VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
+WORKDIR /
+ENV HOME /root
+CMD ["/sbin/my_init"]

+ 11 - 0
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd

@@ -0,0 +1,11 @@
+/var/log/*.log /var/log/*/*.log {
+  weekly
+  size 50M
+  missingok
+  rotate 10
+  compress
+  delaycompress
+  notifempty
+  copytruncate
+  su root syslog
+}

+ 36 - 0
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh

@@ -0,0 +1,36 @@
+#!/bin/bash
+
+conf_dir=/etc/graphite-statsd/conf
+
+# auto setup graphite with default configs if /opt/graphite is missing
+# needed for the use case when a docker host volume is mounted at an of the following:
+#  - /opt/graphite
+#  - /opt/graphite/conf
+#  - /opt/graphite/webapp/graphite
+graphite_dir_contents=$(find /opt/graphite -mindepth 1 -print -quit)
+graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
+graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
+graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
+if [[ -z $graphite_dir_contents ]]; then
+  git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
+  cd /usr/local/src/graphite-web && python ./setup.py install
+fi
+if [[ -z $graphite_storage_dir_contents ]]; then
+  /usr/local/bin/django_admin_init.exp
+fi
+if [[ -z $graphite_conf_dir_contents ]]; then
+  cp -R $conf_dir/opt/graphite/conf/*.conf /opt/graphite/conf/
+fi
+if [[ -z $graphite_webapp_dir_contents ]]; then
+  cp $conf_dir/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
+fi
+
+# auto setup statsd with default config if /opt/statsd is missing
+# needed for the use case when a docker host volume is mounted at an of the following:
+#  - /opt/statsd
+statsd_dir_contents=$(find /opt/statsd -mindepth 1 -print -quit)
+if [[ -z $statsd_dir_contents ]]; then
+  git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
+  cp $conf_dir/opt/statsd/config.js /opt/statsd/config.js
+fi
+

+ 96 - 0
docker/blocks/graphite1/conf/etc/nginx/nginx.conf

@@ -0,0 +1,96 @@
+user www-data;
+worker_processes 4;
+pid /run/nginx.pid;
+daemon off;
+
+events {
+	worker_connections 768;
+	# multi_accept on;
+}
+
+http {
+
+	##
+	# Basic Settings
+	##
+
+	sendfile on;
+	tcp_nopush on;
+	tcp_nodelay on;
+	keepalive_timeout 65;
+	types_hash_max_size 2048;
+	# server_tokens off;
+
+	# server_names_hash_bucket_size 64;
+	# server_name_in_redirect off;
+
+	include /etc/nginx/mime.types;
+	default_type application/octet-stream;
+
+	##
+	# Logging Settings
+	##
+
+	access_log /var/log/nginx/access.log;
+	error_log /var/log/nginx/error.log;
+
+	##
+	# Gzip Settings
+	##
+
+	gzip on;
+	gzip_disable "msie6";
+
+	# gzip_vary on;
+	# gzip_proxied any;
+	# gzip_comp_level 6;
+	# gzip_buffers 16 8k;
+	# gzip_http_version 1.1;
+	# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
+
+	##
+	# nginx-naxsi config
+	##
+	# Uncomment it if you installed nginx-naxsi
+	##
+
+	#include /etc/nginx/naxsi_core.rules;
+
+	##
+	# nginx-passenger config
+	##
+	# Uncomment it if you installed nginx-passenger
+	##
+
+	#passenger_root /usr;
+	#passenger_ruby /usr/bin/ruby;
+
+	##
+	# Virtual Host Configs
+	##
+
+	include /etc/nginx/conf.d/*.conf;
+	include /etc/nginx/sites-enabled/*;
+}
+
+
+#mail {
+#	# See sample authentication script at:
+#	# http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
+#
+#	# auth_http localhost/auth.php;
+#	# pop3_capabilities "TOP" "USER";
+#	# imap_capabilities "IMAP4rev1" "UIDPLUS";
+#
+#	server {
+#		listen     localhost:110;
+#		protocol   pop3;
+#		proxy      on;
+#	}
+#
+#	server {
+#		listen     localhost:143;
+#		protocol   imap;
+#		proxy      on;
+#	}
+#}

+ 31 - 0
docker/blocks/graphite1/conf/etc/nginx/sites-enabled/graphite-statsd.conf

@@ -0,0 +1,31 @@
+server {
+  listen 80;
+  root /opt/graphite/static;
+  index index.html;
+
+  location /media {
+    # django admin static files
+    alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/media/;
+  }
+
+  location /admin/auth/admin {
+    alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
+  }
+
+  location /admin/auth/user/admin {
+    alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
+  }
+
+  location / {
+    proxy_pass http://localhost:8080;
+    proxy_set_header  Host      $host;
+    proxy_set_header  X-Real-IP $remote_addr;
+    proxy_set_header  X-Forwarded-For $proxy_add_x_forwarded_for;
+
+    add_header 'Access-Control-Allow-Origin' '*';
+    add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
+    add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type';
+    add_header 'Access-Control-Allow-Credentials' 'true';
+  }
+
+}

+ 4 - 0
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run

@@ -0,0 +1,4 @@
+#!/bin/bash
+
+rm -f /opt/graphite/storage/carbon-aggregator-a.pid
+exec /usr/bin/python /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 >> /var/log/carbon-aggregator.log

+ 4 - 0
docker/blocks/graphite1/conf/etc/service/carbon/run

@@ -0,0 +1,4 @@
+#!/bin/bash
+
+rm -f /opt/graphite/storage/carbon-cache-a.pid
+exec /usr/bin/python /opt/graphite/bin/carbon-cache.py start --debug 2>&1 >> /var/log/carbon.log

+ 3 - 0
docker/blocks/graphite1/conf/etc/service/graphite/run

@@ -0,0 +1,3 @@
+#!/bin/bash
+
+export PYTHONPATH=/opt/graphite/webapp && exec /usr/local/bin/gunicorn wsgi --workers=4 --bind=127.0.0.1:8080 --log-file=/var/log/gunicorn.log --preload --pythonpath=/opt/graphite/webapp/graphite

+ 4 - 0
docker/blocks/graphite1/conf/etc/service/nginx/run

@@ -0,0 +1,4 @@
+#!/bin/bash
+
+mkdir -p /var/log/nginx
+exec /usr/sbin/nginx -c /etc/nginx/nginx.conf

+ 4 - 0
docker/blocks/graphite1/conf/etc/service/statsd/run

@@ -0,0 +1,4 @@
+#!/bin/bash
+
+exec /usr/bin/nodejs /opt/statsd/stats.js /opt/statsd/config.js >> /var/log/statsd.log 2>&1
+

+ 35 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/aggregation-rules.conf

@@ -0,0 +1,35 @@
+# The form of each line in this file should be as follows:
+#
+#   output_template (frequency) = method input_pattern
+#
+# This will capture any received metrics that match 'input_pattern'
+# for calculating an aggregate metric. The calculation will occur
+# every 'frequency' seconds and the 'method' can specify 'sum' or
+# 'avg'. The name of the aggregate metric will be derived from
+# 'output_template' filling in any captured fields from 'input_pattern'.
+#
+# For example, if you're metric naming scheme is:
+#
+#   <env>.applications.<app>.<server>.<metric>
+#
+# You could configure some aggregations like so:
+#
+#   <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
+#   <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
+#
+# As an example, if the following metrics are received:
+#
+#   prod.applications.apache.www01.requests
+#   prod.applications.apache.www01.requests
+#
+# They would all go into the same aggregation buffer and after 60 seconds the
+# aggregate metric 'prod.applications.apache.all.requests' would be calculated
+# by summing their values.
+#
+# Template components such as <env> will match everything up to the next dot.
+# To match metric multiple components including the dots, use <<metric>> in the
+# input template:
+#
+#   <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>>
+#
+# Note that any time this file is modified, it will be re-read automatically.

+ 5 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/blacklist.conf

@@ -0,0 +1,5 @@
+# This file takes a single regular expression per line
+# If USE_WHITELIST is set to True in carbon.conf, any metrics received which
+# match one of these expressions will be dropped
+# This file is reloaded automatically when changes are made
+^some\.noisy\.metric\.prefix\..*

+ 75 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.amqp.conf

@@ -0,0 +1,75 @@
+# This is a configuration file with AMQP enabled
+
+[cache]
+LOCAL_DATA_DIR =
+
+# Specify the user to drop privileges to
+# If this is blank carbon runs as the user that invokes it
+# This user must have write access to the local data directory
+USER =
+
+# Limit the size of the cache to avoid swapping or becoming CPU bound.
+# Sorts and serving cache queries gets more expensive as the cache grows.
+# Use the value "inf" (infinity) for an unlimited cache size.
+MAX_CACHE_SIZE = inf
+
+# Limits the number of whisper update_many() calls per second, which effectively
+# means the number of write requests sent to the disk. This is intended to
+# prevent over-utilizing the disk and thus starving the rest of the system.
+# When the rate of required updates exceeds this, then carbon's caching will
+# take effect and increase the overall throughput accordingly.
+MAX_UPDATES_PER_SECOND = 1000
+
+# Softly limits the number of whisper files that get created each minute.
+# Setting this value low (like at 50) is a good way to ensure your graphite
+# system will not be adversely impacted when a bunch of new metrics are
+# sent to it. The trade off is that it will take much longer for those metrics'
+# database files to all get created and thus longer until the data becomes usable.
+# Setting this value high (like "inf" for infinity) will cause graphite to create
+# the files quickly but at the risk of slowing I/O down considerably for a while.
+MAX_CREATES_PER_MINUTE = inf
+
+LINE_RECEIVER_INTERFACE = 0.0.0.0
+LINE_RECEIVER_PORT = 2003
+
+UDP_RECEIVER_INTERFACE = 0.0.0.0
+UDP_RECEIVER_PORT = 2003
+
+PICKLE_RECEIVER_INTERFACE = 0.0.0.0
+PICKLE_RECEIVER_PORT = 2004
+
+CACHE_QUERY_INTERFACE = 0.0.0.0
+CACHE_QUERY_PORT = 7002
+
+# Enable AMQP if you want to receve metrics using you amqp broker
+ENABLE_AMQP = True
+
+# Verbose means a line will be logged for every metric received
+# useful for testing
+AMQP_VERBOSE = True
+
+# your credentials for the amqp server
+# AMQP_USER = guest
+# AMQP_PASSWORD = guest
+
+# the network settings for the amqp server
+# AMQP_HOST = localhost
+# AMQP_PORT = 5672
+
+# if you want to include the metric name as part of the message body
+# instead of as the routing key, set this to True
+# AMQP_METRIC_NAME_IN_BODY = False
+
+# NOTE: you cannot run both a cache and a relay on the same server
+# with the default configuration, you have to specify a distinict
+# interfaces and ports for the listeners.
+
+[relay]
+LINE_RECEIVER_INTERFACE = 0.0.0.0
+LINE_RECEIVER_PORT = 2003
+
+PICKLE_RECEIVER_INTERFACE = 0.0.0.0
+PICKLE_RECEIVER_PORT = 2004
+
+CACHE_SERVERS = server1, server2, server3
+MAX_QUEUE_SIZE = 10000

+ 359 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf

@@ -0,0 +1,359 @@
+[cache]
+# Configure carbon directories.
+#
+# OS environment variables can be used to tell carbon where graphite is
+# installed, where to read configuration from and where to write data.
+#
+#   GRAPHITE_ROOT        - Root directory of the graphite installation.
+#                          Defaults to ../
+#   GRAPHITE_CONF_DIR    - Configuration directory (where this file lives).
+#                          Defaults to $GRAPHITE_ROOT/conf/
+#   GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
+#                          Defaults to $GRAPHITE_ROOT/storage/
+#
+# To change other directory paths, add settings to this file. The following
+# configuration variables are available with these default values:
+#
+#   STORAGE_DIR    = $GRAPHITE_STORAGE_DIR
+#   LOCAL_DATA_DIR = STORAGE_DIR/whisper/
+#   WHITELISTS_DIR = STORAGE_DIR/lists/
+#   CONF_DIR       = STORAGE_DIR/conf/
+#   LOG_DIR        = STORAGE_DIR/log/
+#   PID_DIR        = STORAGE_DIR/
+#
+# For FHS style directory structures, use:
+#
+#   STORAGE_DIR    = /var/lib/carbon/
+#   CONF_DIR       = /etc/carbon/
+#   LOG_DIR        = /var/log/carbon/
+#   PID_DIR        = /var/run/
+#
+#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
+
+# Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate
+ENABLE_LOGROTATION = True
+
+# Specify the user to drop privileges to
+# If this is blank carbon runs as the user that invokes it
+# This user must have write access to the local data directory
+USER =
+#
+# NOTE: The above settings must be set under [relay] and [aggregator]
+#       to take effect for those daemons as well
+
+# Limit the size of the cache to avoid swapping or becoming CPU bound.
+# Sorts and serving cache queries gets more expensive as the cache grows.
+# Use the value "inf" (infinity) for an unlimited cache size.
+MAX_CACHE_SIZE = inf
+
+# Limits the number of whisper update_many() calls per second, which effectively
+# means the number of write requests sent to the disk. This is intended to
+# prevent over-utilizing the disk and thus starving the rest of the system.
+# When the rate of required updates exceeds this, then carbon's caching will
+# take effect and increase the overall throughput accordingly.
+MAX_UPDATES_PER_SECOND = 500
+
+# If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a
+# stop/shutdown is initiated.  This helps when MAX_UPDATES_PER_SECOND is
+# relatively low and carbon has cached a lot of updates; it enables the carbon
+# daemon to shutdown more quickly.
+# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
+
+# Softly limits the number of whisper files that get created each minute.
+# Setting this value low (like at 50) is a good way to ensure your graphite
+# system will not be adversely impacted when a bunch of new metrics are
+# sent to it. The trade off is that it will take much longer for those metrics'
+# database files to all get created and thus longer until the data becomes usable.
+# Setting this value high (like "inf" for infinity) will cause graphite to create
+# the files quickly but at the risk of slowing I/O down considerably for a while.
+MAX_CREATES_PER_MINUTE = 50
+
+LINE_RECEIVER_INTERFACE = 0.0.0.0
+LINE_RECEIVER_PORT = 2003
+
+# Set this to True to enable the UDP listener. By default this is off
+# because it is very common to run multiple carbon daemons and managing
+# another (rarely used) port for every carbon instance is not fun.
+ENABLE_UDP_LISTENER = False
+UDP_RECEIVER_INTERFACE = 0.0.0.0
+UDP_RECEIVER_PORT = 2003
+
+PICKLE_RECEIVER_INTERFACE = 0.0.0.0
+PICKLE_RECEIVER_PORT = 2004
+
+# Set to false to disable logging of successful connections
+LOG_LISTENER_CONNECTIONS = True
+
+# Per security concerns outlined in Bug #817247 the pickle receiver
+# will use a more secure and slightly less efficient unpickler.
+# Set this to True to revert to the old-fashioned insecure unpickler.
+USE_INSECURE_UNPICKLER = False
+
+CACHE_QUERY_INTERFACE = 0.0.0.0
+CACHE_QUERY_PORT = 7002
+
+# Set this to False to drop datapoints received after the cache
+# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
+# over which metrics are received will temporarily stop accepting
+# data until the cache size falls below 95% MAX_CACHE_SIZE.
+USE_FLOW_CONTROL = True
+
+# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and
+# degrade performance if logging on the same volume as the whisper data is stored.
+LOG_UPDATES = False
+LOG_CACHE_HITS = False
+LOG_CACHE_QUEUE_SORTS = True
+
+# The thread that writes metrics to disk can use on of the following strategies
+# determining the order in which metrics are removed from cache and flushed to
+# disk. The default option preserves the same behavior as has been historically
+# available in version 0.9.10.
+#
+# sorted - All metrics in the cache will be counted and an ordered list of
+# them will be sorted according to the number of datapoints in the cache at the
+# moment of the list's creation. Metrics will then be flushed from the cache to
+# disk in that order.
+#
+# max - The writer thread will always pop and flush the metric from cache
+# that has the most datapoints. This will give a strong flush preference to
+# frequently updated metrics and will also reduce random file-io. Infrequently
+# updated metrics may only ever be persisted to disk at daemon shutdown if
+# there are a large number of metrics which receive very frequent updates OR if
+# disk i/o is very slow.
+#
+# naive - Metrics will be flushed from the cache to disk in an unordered
+# fashion. This strategy may be desirable in situations where the storage for
+# whisper files is solid state, CPU resources are very limited or deference to
+# the OS's i/o scheduler is expected to compensate for the random write
+# pattern.
+#
+CACHE_WRITE_STRATEGY = sorted
+
+# On some systems it is desirable for whisper to write synchronously.
+# Set this option to True if you'd like to try this. Basically it will
+# shift the onus of buffering writes from the kernel into carbon's cache.
+WHISPER_AUTOFLUSH = False
+
+# By default new Whisper files are created pre-allocated with the data region
+# filled with zeros to prevent fragmentation and speed up contiguous reads and
+# writes (which are common). Enabling this option will cause Whisper to create
+# the file sparsely instead. Enabling this option may allow a large increase of
+# MAX_CREATES_PER_MINUTE but may have longer term performance implications
+# depending on the underlying storage configuration.
+# WHISPER_SPARSE_CREATE = False
+
+# Only beneficial on linux filesystems that support the fallocate system call.
+# It maintains the benefits of contiguous reads/writes, but with a potentially
+# much faster creation speed, by allowing the kernel to handle the block
+# allocation and zero-ing. Enabling this option may allow a large increase of
+# MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported
+# this option will gracefully fallback to standard POSIX file access methods.
+WHISPER_FALLOCATE_CREATE = True
+
+# Enabling this option will cause Whisper to lock each Whisper file it writes
+# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
+# multiple carbon-cache daemons are writing to the same files
+# WHISPER_LOCK_WRITES = False
+
+# Set this to True to enable whitelisting and blacklisting of metrics in
+# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
+# empty, all metrics will pass through
+# USE_WHITELIST = False
+
+# By default, carbon itself will log statistics (such as a count,
+# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
+# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
+# CARBON_METRIC_PREFIX = carbon
+# CARBON_METRIC_INTERVAL = 60
+
+# Enable AMQP if you want to receve metrics using an amqp broker
+# ENABLE_AMQP = False
+
+# Verbose means a line will be logged for every metric received
+# useful for testing
+# AMQP_VERBOSE = False
+
+# AMQP_HOST = localhost
+# AMQP_PORT = 5672
+# AMQP_VHOST = /
+# AMQP_USER = guest
+# AMQP_PASSWORD = guest
+# AMQP_EXCHANGE = graphite
+# AMQP_METRIC_NAME_IN_BODY = False
+
+# The manhole interface allows you to SSH into the carbon daemon
+# and get a python interpreter. BE CAREFUL WITH THIS! If you do
+# something like time.sleep() in the interpreter, the whole process
+# will sleep! This is *extremely* helpful in debugging, assuming
+# you are familiar with the code. If you are not, please don't
+# mess with this, you are asking for trouble :)
+#
+# ENABLE_MANHOLE = False
+# MANHOLE_INTERFACE = 127.0.0.1
+# MANHOLE_PORT = 7222
+# MANHOLE_USER = admin
+# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
+
+# Patterns for all of the metrics this machine will store. Read more at
+# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
+#
+# Example: store all sales, linux servers, and utilization metrics
+# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
+#
+# Example: store everything
+# BIND_PATTERNS = #
+
+# To configure special settings for the carbon-cache instance 'b', uncomment this:
+#[cache:b]
+#LINE_RECEIVER_PORT = 2103
+#PICKLE_RECEIVER_PORT = 2104
+#CACHE_QUERY_PORT = 7102
+# and any other settings you want to customize, defaults are inherited
+# from [carbon] section.
+# You can then specify the --instance=b option to manage this instance
+
+
+
+[relay]
+LINE_RECEIVER_INTERFACE = 0.0.0.0
+LINE_RECEIVER_PORT = 2013
+PICKLE_RECEIVER_INTERFACE = 0.0.0.0
+PICKLE_RECEIVER_PORT = 2014
+
+# Set to false to disable logging of successful connections
+LOG_LISTENER_CONNECTIONS = True
+
+# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
+#
+# Use relay-rules.conf to route metrics to destinations based on pattern rules
+#RELAY_METHOD = rules
+#
+# Use consistent-hashing for even distribution of metrics between destinations
+#RELAY_METHOD = consistent-hashing
+#
+# Use consistent-hashing but take into account an aggregation-rules.conf shared
+# by downstream carbon-aggregator daemons. This will ensure that all metrics
+# that map to a given aggregation rule are sent to the same carbon-aggregator
+# instance.
+# Enable this for carbon-relays that send to a group of carbon-aggregators
+#RELAY_METHOD = aggregated-consistent-hashing
+RELAY_METHOD = rules
+
+# If you use consistent-hashing you can add redundancy by replicating every
+# datapoint to more than one machine.
+REPLICATION_FACTOR = 1
+
+# This is a list of carbon daemons we will send any relayed or
+# generated metrics to. The default provided would send to a single
+# carbon-cache instance on the default port. However if you
+# use multiple carbon-cache instances then it would look like this:
+#
+# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
+#
+# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
+# optional and refers to the "None" instance if omitted.
+#
+# Note that if the destinations are all carbon-caches then this should
+# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
+# instances listed (order matters!).
+#
+# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
+# must be defined in this list
+DESTINATIONS = 127.0.0.1:2004
+
+# This defines the maximum "message size" between carbon daemons.
+# You shouldn't need to tune this unless you really know what you're doing.
+MAX_DATAPOINTS_PER_MESSAGE = 500
+MAX_QUEUE_SIZE = 10000
+
+# Set this to False to drop datapoints when any send queue (sending datapoints
+# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
+# default) then sockets over which metrics are received will temporarily stop accepting
+# data until the send queues fall below 80% MAX_QUEUE_SIZE.
+USE_FLOW_CONTROL = True
+
+# Set this to True to enable whitelisting and blacklisting of metrics in
+# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
+# empty, all metrics will pass through
+# USE_WHITELIST = False
+
+# By default, carbon itself will log statistics (such as a count,
+# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
+# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
+# CARBON_METRIC_PREFIX = carbon
+# CARBON_METRIC_INTERVAL = 60
+
+
+[aggregator]
+LINE_RECEIVER_INTERFACE = 0.0.0.0
+LINE_RECEIVER_PORT = 2023
+
+PICKLE_RECEIVER_INTERFACE = 0.0.0.0
+PICKLE_RECEIVER_PORT = 2024
+
+# Set to false to disable logging of successful connections
+LOG_LISTENER_CONNECTIONS = True
+
+# If set true, metric received will be forwarded to DESTINATIONS in addition to
+# the output of the aggregation rules. If set false the carbon-aggregator will
+# only ever send the output of aggregation.
+FORWARD_ALL = True
+
+# This is a list of carbon daemons we will send any relayed or
+# generated metrics to. The default provided would send to a single
+# carbon-cache instance on the default port. However if you
+# use multiple carbon-cache instances then it would look like this:
+#
+# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
+#
+# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
+# optional and refers to the "None" instance if omitted.
+#
+# Note that if the destinations are all carbon-caches then this should
+# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
+# instances listed (order matters!).
+DESTINATIONS = 127.0.0.1:2004
+
+# If you want to add redundancy to your data by replicating every
+# datapoint to more than one machine, increase this.
+REPLICATION_FACTOR = 1
+
+# This is the maximum number of datapoints that can be queued up
+# for a single destination. Once this limit is hit, we will
+# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
+# we will drop any subsequently received datapoints.
+MAX_QUEUE_SIZE = 10000
+
+# Set this to False to drop datapoints when any send queue (sending datapoints
+# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
+# default) then sockets over which metrics are received will temporarily stop accepting
+# data until the send queues fall below 80% MAX_QUEUE_SIZE.
+USE_FLOW_CONTROL = True
+
+# This defines the maximum "message size" between carbon daemons.
+# You shouldn't need to tune this unless you really know what you're doing.
+MAX_DATAPOINTS_PER_MESSAGE = 500
+
+# This defines how many datapoints the aggregator remembers for
+# each metric. Aggregation only happens for datapoints that fall in
+# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
+MAX_AGGREGATION_INTERVALS = 5
+
+# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
+# aggregated data points once every rule.frequency seconds, on a per-rule basis.
+# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
+# every N seconds, independent of rule frequency. This is useful, for example,
+# to be able to query partially aggregated metrics from carbon-cache without
+# having to first wait rule.frequency seconds.
+# WRITE_BACK_FREQUENCY = 0
+
+# Set this to True to enable whitelisting and blacklisting of metrics in
+# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
+# empty, all metrics will pass through
+# USE_WHITELIST = False
+
+# By default, carbon itself will log statistics (such as a count,
+# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
+# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
+# CARBON_METRIC_PREFIX = carbon
+# CARBON_METRIC_INTERVAL = 60

+ 57 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/dashboard.conf

@@ -0,0 +1,57 @@
+# This configuration file controls the behavior of the Dashboard UI, available
+# at http://my-graphite-server/dashboard/.
+#
+# This file must contain a [ui] section that defines values for all of the
+# following settings.
+[ui]
+default_graph_width = 400
+default_graph_height = 250
+automatic_variants = true
+refresh_interval = 60
+autocomplete_delay = 375
+merge_hover_delay = 750
+
+# You can set this 'default', 'white', or a custom theme name.
+# To create a custom theme, copy the dashboard-default.css file
+# to dashboard-myThemeName.css in the content/css directory and
+# modify it to your liking.
+theme = default
+
+[keyboard-shortcuts]
+toggle_toolbar = ctrl-z
+toggle_metrics_panel = ctrl-space
+erase_all_graphs = alt-x
+save_dashboard = alt-s
+completer_add_metrics = alt-enter
+completer_del_metrics = alt-backspace
+give_completer_focus = shift-space
+
+# These settings apply to the UI as a whole, all other sections in this file
+# pertain only to specific metric types.
+#
+# The dashboard presents only metrics that fall into specified naming schemes
+# defined in this file. This creates a simpler, more targetted view of the
+# data. The general form for defining a naming scheme is as follows:
+#
+#[Metric Type]
+#scheme = basis.path.<field1>.<field2>.<fieldN>
+#field1.label = Foo
+#field2.label = Bar
+#
+#
+# Where each <field> will be displayed as a dropdown box
+# in the UI and the remaining portion of the namespace
+# shown in the Metric Selector panel. The .label options set the labels
+# displayed for each dropdown.
+#
+# For example:
+#
+#[Sales]
+#scheme = sales.<channel>.<type>.<brand>
+#channel.label = Channel
+#type.label = Product Type
+#brand.label = Brand
+#
+# This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector
+# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc)
+# will be available in the Metric Selector (upper-right panel).

+ 38 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/graphTemplates.conf

@@ -0,0 +1,38 @@
+[default]
+background = black
+foreground = white
+majorLine = white
+minorLine = grey
+lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose
+fontName = Sans
+fontSize = 10
+fontBold = False
+fontItalic = False
+
+[noc]
+background = black
+foreground = white
+majorLine = white
+minorLine = grey
+lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose
+fontName = Sans
+fontSize = 10
+fontBold = False
+fontItalic = False
+
+[plain]
+background = white
+foreground = black
+minorLine = grey
+majorLine = rose
+
+[summary]
+background = black
+lineColors = #6666ff, #66ff66, #ff6666
+
+[alphas]
+background = white
+foreground = black
+majorLine = grey
+minorLine = rose
+lineColors = 00ff00aa,ff000077,00337799

+ 21 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/relay-rules.conf

@@ -0,0 +1,21 @@
+# Relay destination rules for carbon-relay. Entries are scanned in order,
+# and the first pattern a metric matches will cause processing to cease after sending
+# unless `continue` is set to true
+#
+#  [name]
+#  pattern = <regex>
+#  destinations = <list of destination addresses>
+#  continue = <boolean>  # default: False
+#
+#  name: Arbitrary unique name to identify the rule
+#  pattern: Regex pattern to match against the metric name
+#  destinations: Comma-separated list of destinations.
+#    ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com
+#  continue: Continue processing rules if this rule matches (default: False)
+
+# You must have exactly one section with 'default = true'
+# Note that all destinations listed must also exist in carbon.conf
+# in the DESTINATIONS setting in the [relay] section
+[default]
+default = true
+destinations = 127.0.0.1:2004:a, 127.0.0.1:2104:b

+ 18 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/rewrite-rules.conf

@@ -0,0 +1,18 @@
+# This file defines regular expression patterns that can be used to
+# rewrite metric names in a search & replace fashion. It consists of two
+# sections, [pre] and [post]. The rules in the pre section are applied to
+# metric names as soon as they are received. The post rules are applied
+# after aggregation has taken place.
+#
+# The general form of each rule is as follows:
+#
+# regex-pattern = replacement-text
+#
+# For example:
+#
+# [post]
+# _sum$ =
+# _avg$ =
+#
+# These rules would strip off a suffix of _sum or _avg from any metric names
+# after aggregation.

+ 43 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/storage-aggregation.conf

@@ -0,0 +1,43 @@
+# Aggregation methods for whisper files. Entries are scanned in order,
+# and first match wins. This file is scanned for changes every 60 seconds
+#
+#  [name]
+#  pattern = <regex>
+#  xFilesFactor = <float between 0 and 1>
+#  aggregationMethod = <average|sum|last|max|min>
+#
+#  name: Arbitrary unique name for the rule
+#  pattern: Regex pattern to match against the metric name
+#  xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
+#  aggregationMethod: function to apply to data points for aggregation
+#
+[min]
+pattern = \.lower$
+xFilesFactor = 0.1
+aggregationMethod = min
+
+[max]
+pattern = \.upper(_\d+)?$
+xFilesFactor = 0.1
+aggregationMethod = max
+
+[sum]
+pattern = \.sum$
+xFilesFactor = 0
+aggregationMethod = sum
+
+[count]
+pattern = \.count$
+xFilesFactor = 0
+aggregationMethod = sum
+
+[count_legacy]
+pattern = ^stats_counts.*
+xFilesFactor = 0
+aggregationMethod = sum
+
+[default_average]
+pattern = .*
+xFilesFactor = 0.3
+aggregationMethod = average
+

+ 17 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/storage-schemas.conf

@@ -0,0 +1,17 @@
+# Schema definitions for Whisper files. Entries are scanned in order,
+[carbon]
+pattern = ^carbon\..*
+retentions = 1m:31d,10m:1y,1h:5y
+
+[highres]
+pattern = ^highres.*
+retentions = 1s:1d,1m:7d
+
+[statsd]
+pattern = ^statsd.*
+retentions = 1m:7d,10m:1y
+
+[default]
+pattern = .*
+retentions = 10s:1d,1m:7d,10m:1y
+

+ 6 - 0
docker/blocks/graphite1/conf/opt/graphite/conf/whitelist.conf

@@ -0,0 +1,6 @@
+# This file takes a single regular expression per line
+# If USE_WHITELIST is set to True in carbon.conf, only metrics received which
+# match one of these expressions will be persisted. If this file is empty or
+# missing, all metrics will pass through.
+# This file is reloaded automatically when changes are made
+.*

+ 94 - 0
docker/blocks/graphite1/conf/opt/graphite/webapp/graphite/app_settings.py

@@ -0,0 +1,94 @@
+"""Copyright 2008 Orbitz WorldWide
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License."""
+
+# Django settings for graphite project.
+# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
+from os.path import dirname, join, abspath
+
+
+#Django settings below, do not touch!
+APPEND_SLASH = False
+TEMPLATE_DEBUG = False
+
+TEMPLATES = [
+    {
+        'BACKEND': 'django.template.backends.django.DjangoTemplates',
+        'DIRS': [
+            join(dirname( abspath(__file__) ), 'templates')
+        ],
+        'APP_DIRS': True,
+        'OPTIONS': {
+            'context_processors': [
+                # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
+                # list if you haven't customized them:
+                'django.contrib.auth.context_processors.auth',
+                'django.template.context_processors.debug',
+                'django.template.context_processors.i18n',
+                'django.template.context_processors.media',
+                'django.template.context_processors.static',
+                'django.template.context_processors.tz',
+                'django.contrib.messages.context_processors.messages',
+            ],
+        },
+    },
+]
+
+# Language code for this installation. All choices can be found here:
+# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
+# http://blogs.law.harvard.edu/tech/stories/storyReader$15
+LANGUAGE_CODE = 'en-us'
+
+# Absolute path to the directory that holds media.
+MEDIA_ROOT = ''
+
+# URL that handles the media served from MEDIA_ROOT.
+# Example: "http://media.lawrence.com"
+MEDIA_URL = ''
+
+MIDDLEWARE_CLASSES = (
+  'graphite.middleware.LogExceptionsMiddleware',
+  'django.middleware.common.CommonMiddleware',
+  'django.middleware.gzip.GZipMiddleware',
+  'django.contrib.sessions.middleware.SessionMiddleware',
+  'django.contrib.auth.middleware.AuthenticationMiddleware',
+  'django.contrib.messages.middleware.MessageMiddleware',
+)
+
+ROOT_URLCONF = 'graphite.urls'
+
+INSTALLED_APPS = (
+  'graphite.metrics',
+  'graphite.render',
+  'graphite.browser',
+  'graphite.composer',
+  'graphite.account',
+  'graphite.dashboard',
+  'graphite.whitelist',
+  'graphite.events',
+  'graphite.url_shortener',
+  'django.contrib.auth',
+  'django.contrib.sessions',
+  'django.contrib.admin',
+  'django.contrib.contenttypes',
+  'django.contrib.staticfiles',
+  'tagging',
+)
+
+AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
+
+GRAPHITE_WEB_APP_SETTINGS_LOADED = True
+
+STATIC_URL = '/static/'
+
+STATIC_ROOT = '/opt/graphite/static/'

+ 215 - 0
docker/blocks/graphite1/conf/opt/graphite/webapp/graphite/local_settings.py

@@ -0,0 +1,215 @@
+## Graphite local_settings.py
+# Edit this file to customize the default Graphite webapp settings
+#
+# Additional customizations to Django settings can be added to this file as well
+
+#####################################
+# General Configuration #
+#####################################
+# Set this to a long, random unique string to use as a secret key for this
+# install. This key is used for salting of hashes used in auth tokens,
+# CRSF middleware, cookie storage, etc. This should be set identically among
+# instances if used behind a load balancer.
+#SECRET_KEY = 'UNSAFE_DEFAULT'
+
+# In Django 1.5+ set this to the list of hosts your graphite instances is
+# accessible as. See:
+# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
+#ALLOWED_HOSTS = [ '*' ]
+
+# Set your local timezone (Django's default is America/Chicago)
+# If your graphs appear to be offset by a couple hours then this probably
+# needs to be explicitly set to your local timezone.
+#TIME_ZONE = 'America/Los_Angeles'
+
+# Override this to provide documentation specific to your Graphite deployment
+#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
+
+# Logging
+#LOG_RENDERING_PERFORMANCE = True
+#LOG_CACHE_PERFORMANCE = True
+#LOG_METRIC_ACCESS = True
+
+# Enable full debug page display on exceptions (Internal Server Error pages)
+#DEBUG = True
+
+# If using RRD files and rrdcached, set to the address or socket of the daemon
+#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
+
+# This lists the memcached servers that will be used by this webapp.
+# If you have a cluster of webapps you should ensure all of them
+# have the *exact* same value for this setting. That will maximize cache
+# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
+# memcached entirely.
+#
+# You should not use the loopback address (127.0.0.1) here if using clustering
+# as every webapp in the cluster should use the exact same values to prevent
+# unneeded cache misses. Set to [] to disable caching of images and fetched data
+#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
+#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
+
+
+#####################################
+# Filesystem Paths #
+#####################################
+# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
+# to somewhere else
+#GRAPHITE_ROOT = '/opt/graphite'
+
+# Most installs done outside of a separate tree such as /opt/graphite will only
+# need to change these three settings. Note that the default settings for each
+# of these is relative to GRAPHITE_ROOT
+#CONF_DIR = '/opt/graphite/conf'
+#STORAGE_DIR = '/opt/graphite/storage'
+#CONTENT_DIR = '/opt/graphite/webapp/content'
+
+# To further or fully customize the paths, modify the following. Note that the
+# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
+#
+## Webapp config files
+#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
+#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
+
+## Data directories
+# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
+#WHISPER_DIR = '/opt/graphite/storage/whisper'
+#RRD_DIR = '/opt/graphite/storage/rrd'
+#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
+#LOG_DIR = '/opt/graphite/storage/log/webapp'
+#INDEX_FILE = '/opt/graphite/storage/index'  # Search index file
+
+
+#####################################
+# Email Configuration #
+#####################################
+# This is used for emailing rendered Graphs
+# Default backend is SMTP
+#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
+#EMAIL_HOST = 'localhost'
+#EMAIL_PORT = 25
+#EMAIL_HOST_USER = ''
+#EMAIL_HOST_PASSWORD = ''
+#EMAIL_USE_TLS = False
+# To drop emails on the floor, enable the Dummy backend:
+#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
+
+
+#####################################
+# Authentication Configuration #
+#####################################
+## LDAP / ActiveDirectory authentication setup
+#USE_LDAP_AUTH = True
+#LDAP_SERVER = "ldap.mycompany.com"
+#LDAP_PORT = 389
+#	OR
+#LDAP_URI = "ldaps://ldap.mycompany.com:636"
+#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
+#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
+#LDAP_BASE_PASS = "readonly_account_password"
+#LDAP_USER_QUERY = "(username=%s)"  #For Active Directory use "(sAMAccountName=%s)"
+#
+# If you want to further customize the ldap connection options you should
+# directly use ldap.set_option to set the ldap module's global options.
+# For example:
+#
+#import ldap
+#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
+#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
+#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
+#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
+# See http://www.python-ldap.org/ for further details on these options.
+
+## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
+#USE_REMOTE_USER_AUTHENTICATION = True
+
+# Override the URL for the login link (e.g. for django_openid_auth)
+#LOGIN_URL = '/account/login'
+
+
+##########################
+# Database Configuration #
+##########################
+# By default sqlite is used. If you cluster multiple webapps you will need
+# to setup an external database (such as MySQL) and configure all of the webapp
+# instances to use the same database. Note that this database is only used to store
+# Django models such as saved graphs, dashboards, user preferences, etc.
+# Metric data is not stored here.
+#
+# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
+#
+# The following built-in database engines are available:
+#  django.db.backends.postgresql          # Removed in Django 1.4
+#  django.db.backends.postgresql_psycopg2
+#  django.db.backends.mysql
+#  django.db.backends.sqlite3
+#  django.db.backends.oracle
+#
+# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
+# located in STORAGE_DIR
+#
+#DATABASES = {
+#    'default': {
+#        'NAME': '/opt/graphite/storage/graphite.db',
+#        'ENGINE': 'django.db.backends.sqlite3',
+#        'USER': '',
+#        'PASSWORD': '',
+#        'HOST': '',
+#        'PORT': ''
+#    }
+#}
+#
+
+
+#########################
+# Cluster Configuration #
+#########################
+# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
+#
+# This should list the IP address (and optionally port) of the webapp on each
+# remote server in the cluster. These servers must each have local access to
+# metric data. Note that the first server to return a match for a query will be
+# used.
+#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
+
+## These are timeout values (in seconds) for requests to remote webapps
+#REMOTE_STORE_FETCH_TIMEOUT = 6   # Timeout to fetch series data
+#REMOTE_STORE_FIND_TIMEOUT = 2.5  # Timeout for metric find requests
+#REMOTE_STORE_RETRY_DELAY = 60    # Time before retrying a failed remote webapp
+#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
+
+## Remote rendering settings
+# Set to True to enable rendering of Graphs on a remote webapp
+#REMOTE_RENDERING = True
+# List of IP (and optionally port) of the webapp on each remote server that
+# will be used for rendering. Note that each rendering host should have local
+# access to metric data or should have CLUSTER_SERVERS configured
+#RENDERING_HOSTS = []
+#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
+
+# If you are running multiple carbon-caches on this machine (typically behind a relay using
+# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
+# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
+# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
+#
+# You *should* use 127.0.0.1 here in most cases
+#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
+#CARBONLINK_TIMEOUT = 1.0
+
+#####################################
+# Additional Django Settings #
+#####################################
+# Uncomment the following line for direct access to Django settings such as
+# MIDDLEWARE_CLASSES or APPS
+#from graphite.app_settings import *
+
+import os
+
+LOG_DIR = '/var/log/graphite'
+SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)'
+
+if (os.getenv("MEMCACHE_HOST") is not None):
+    MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",")
+
+if (os.getenv("DEFAULT_CACHE_DURATION") is not None):
+    DEFAULT_CACHE_DURATION = int(os.getenv("CACHE_DURATION"))
+

+ 6 - 0
docker/blocks/graphite1/conf/opt/statsd/config.js

@@ -0,0 +1,6 @@
+{
+  "graphiteHost": "127.0.0.1",
+  "graphitePort": 2003,
+  "port": 8125,
+  "flushInterval": 10000
+}

+ 26 - 0
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp

@@ -0,0 +1,26 @@
+#!/usr/bin/env expect
+
+set timeout -1
+spawn /usr/local/bin/manage.sh
+
+expect "Would you like to create one now" {
+  send "yes\r"
+}
+
+expect "Username" {
+  send "root\r"
+}
+
+expect "Email address:" {
+  send "root.graphite@mailinator.com\r"
+}
+
+expect "Password:" {
+  send "root\r"
+}
+
+expect "Password *:" {
+  send "root\r"
+}
+
+expect "Superuser created successfully"

+ 3 - 0
docker/blocks/graphite1/conf/usr/local/bin/manage.sh

@@ -0,0 +1,3 @@
+#!/bin/bash
+PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
+PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings

+ 16 - 0
docker/blocks/graphite1/fig

@@ -0,0 +1,16 @@
+graphite:
+  build: blocks/graphite1
+  ports:
+    - "8080:80"
+    - "2003:2003"
+  volumes:
+    - /etc/localtime:/etc/localtime:ro
+    - /etc/timezone:/etc/timezone:ro
+
+fake-graphite-data:
+  image: grafana/fake-data-gen
+  net: bridge
+  environment:
+    FD_DATASOURCE: graphite
+    FD_PORT: 2003
+

+ 76 - 0
docker/blocks/graphite1/files/carbon.conf

@@ -0,0 +1,76 @@
+[cache]
+LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
+
+# Specify the user to drop privileges to
+# If this is blank carbon runs as the user that invokes it
+# This user must have write access to the local data directory
+USER =
+
+# Limit the size of the cache to avoid swapping or becoming CPU bound.
+# Sorts and serving cache queries gets more expensive as the cache grows.
+# Use the value "inf" (infinity) for an unlimited cache size.
+MAX_CACHE_SIZE = inf
+
+# Limits the number of whisper update_many() calls per second, which effectively
+# means the number of write requests sent to the disk. This is intended to
+# prevent over-utilizing the disk and thus starving the rest of the system.
+# When the rate of required updates exceeds this, then carbon's caching will
+# take effect and increase the overall throughput accordingly.
+MAX_UPDATES_PER_SECOND = 1000
+
+# Softly limits the number of whisper files that get created each minute.
+# Setting this value low (like at 50) is a good way to ensure your graphite
+# system will not be adversely impacted when a bunch of new metrics are
+# sent to it. The trade off is that it will take much longer for those metrics'
+# database files to all get created and thus longer until the data becomes usable.
+# Setting this value high (like "inf" for infinity) will cause graphite to create
+# the files quickly but at the risk of slowing I/O down considerably for a while.
+MAX_CREATES_PER_MINUTE = inf
+
+LINE_RECEIVER_INTERFACE = 0.0.0.0
+LINE_RECEIVER_PORT = 2003
+
+PICKLE_RECEIVER_INTERFACE = 0.0.0.0
+PICKLE_RECEIVER_PORT = 2004
+
+CACHE_QUERY_INTERFACE = 0.0.0.0
+CACHE_QUERY_PORT = 7002
+
+LOG_UPDATES = False
+
+# Enable AMQP if you want to receve metrics using an amqp broker
+# ENABLE_AMQP = False
+
+# Verbose means a line will be logged for every metric received
+# useful for testing
+# AMQP_VERBOSE = False
+
+# AMQP_HOST = localhost
+# AMQP_PORT = 5672
+# AMQP_VHOST = /
+# AMQP_USER = guest
+# AMQP_PASSWORD = guest
+# AMQP_EXCHANGE = graphite
+
+# Patterns for all of the metrics this machine will store. Read more at
+# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
+#
+# Example: store all sales, linux servers, and utilization metrics
+# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
+#
+# Example: store everything
+# BIND_PATTERNS = #
+
+# NOTE: you cannot run both a cache and a relay on the same server
+# with the default configuration, you have to specify a distinict
+# interfaces and ports for the listeners.
+
+[relay]
+LINE_RECEIVER_INTERFACE = 0.0.0.0
+LINE_RECEIVER_PORT = 2003
+
+PICKLE_RECEIVER_INTERFACE = 0.0.0.0
+PICKLE_RECEIVER_PORT = 2004
+
+CACHE_SERVERS = server1, server2, server3
+MAX_QUEUE_SIZE = 10000

+ 102 - 0
docker/blocks/graphite1/files/events_views.py

@@ -0,0 +1,102 @@
+import datetime
+import time
+
+from django.utils.timezone import get_current_timezone
+from django.core.urlresolvers import get_script_prefix
+from django.http import HttpResponse
+from django.shortcuts import render_to_response, get_object_or_404
+from pytz import timezone
+
+from graphite.util import json
+from graphite.events import models
+from graphite.render.attime import parseATTime
+
+
+def to_timestamp(dt):
+    return time.mktime(dt.timetuple())
+
+
+class EventEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if isinstance(obj, datetime.datetime):
+            return to_timestamp(obj)
+        return json.JSONEncoder.default(self, obj)
+
+
+def view_events(request):
+    if request.method == "GET":
+        context = { 'events' : fetch(request),
+            'slash' : get_script_prefix()
+        }
+        return render_to_response("events.html", context)
+    else:
+        return post_event(request)
+
+def detail(request, event_id):
+    e = get_object_or_404(models.Event, pk=event_id)
+    context = { 'event' : e,
+       'slash' : get_script_prefix()
+    }
+    return render_to_response("event.html", context)
+
+
+def post_event(request):
+    if request.method == 'POST':
+        event = json.loads(request.body)
+        assert isinstance(event, dict)
+
+        values = {}
+        values["what"] = event["what"]
+        values["tags"] = event.get("tags", None)
+        values["when"] = datetime.datetime.fromtimestamp(
+            event.get("when", time.time()))
+        if "data" in event:
+            values["data"] = event["data"]
+
+        e = models.Event(**values)
+        e.save()
+
+        return HttpResponse(status=200)
+    else:
+        return HttpResponse(status=405)
+
+def get_data(request):
+    if 'jsonp' in request.REQUEST:
+        response = HttpResponse(
+          "%s(%s)" % (request.REQUEST.get('jsonp'),
+              json.dumps(fetch(request), cls=EventEncoder)),
+          mimetype='text/javascript')
+    else:
+        response = HttpResponse(
+            json.dumps(fetch(request), cls=EventEncoder),
+            mimetype="application/json")
+    return response
+
+def fetch(request):
+    #XXX we need to move to USE_TZ=True to get rid of naive-time conversions
+    def make_naive(dt):
+      if 'tz' in request.GET:
+        tz = timezone(request.GET['tz'])
+      else:
+        tz = get_current_timezone()
+      local_dt = dt.astimezone(tz)
+      if hasattr(local_dt, 'normalize'):
+        local_dt = local_dt.normalize()
+      return local_dt.replace(tzinfo=None)
+
+    if request.GET.get("from", None) is not None:
+        time_from = make_naive(parseATTime(request.GET["from"]))
+    else:
+        time_from = datetime.datetime.fromtimestamp(0)
+
+    if request.GET.get("until", None) is not None:
+        time_until = make_naive(parseATTime(request.GET["until"]))
+    else:
+        time_until = datetime.datetime.now()
+
+    tags = request.GET.get("tags", None)
+    if tags is not None:
+        tags = request.GET.get("tags").split(" ")
+
+    return [x.as_dict() for x in
+            models.Event.find_events(time_from, time_until, tags=tags)]

+ 20 - 0
docker/blocks/graphite1/files/initial_data.json

@@ -0,0 +1,20 @@
+[
+  {
+    "pk": 1,
+    "model": "auth.user",
+    "fields": {
+      "username": "admin",
+      "first_name": "",
+      "last_name": "",
+      "is_active": true,
+      "is_superuser": true,
+      "is_staff": true,
+      "last_login": "2011-09-20 17:02:14",
+      "groups": [],
+      "user_permissions": [],
+      "password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
+      "email": "root@example.com",
+      "date_joined": "2011-09-20 17:02:14"
+    }
+  }
+]

+ 42 - 0
docker/blocks/graphite1/files/local_settings.py

@@ -0,0 +1,42 @@
+# Edit this file to override the default graphite settings, do not edit settings.py
+
+# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
+#DEBUG = True
+
+# Set your local timezone (django will try to figure this out automatically)
+TIME_ZONE = 'UTC'
+
+# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
+#MEMCACHE_HOSTS = ['127.0.0.1:11211']
+
+# Sometimes you need to do a lot of rendering work but cannot share your storage mount
+#REMOTE_RENDERING = True
+#RENDERING_HOSTS = ['fastserver01','fastserver02']
+#LOG_RENDERING_PERFORMANCE = True
+#LOG_CACHE_PERFORMANCE = True
+
+# If you've got more than one backend server they should all be listed here
+#CLUSTER_SERVERS = []
+
+# Override this if you need to provide documentation specific to your graphite deployment
+#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
+
+# Enable email-related features
+#SMTP_SERVER = "mail.mycompany.com"
+
+# LDAP / ActiveDirectory authentication setup
+#USE_LDAP_AUTH = True
+#LDAP_SERVER = "ldap.mycompany.com"
+#LDAP_PORT = 389
+#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
+#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
+#LDAP_BASE_PASS = "readonly_account_password"
+#LDAP_USER_QUERY = "(username=%s)"  #For Active Directory use "(sAMAccountName=%s)"
+
+# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
+#DATABASE_ENGINE = 'mysql' # or 'postgres'
+#DATABASE_NAME = 'graphite'
+#DATABASE_USER = 'graphite'
+#DATABASE_PASSWORD = 'graphite-is-awesome'
+#DATABASE_HOST = 'mysql.mycompany.com'
+#DATABASE_PORT = '3306'

+ 1 - 0
docker/blocks/graphite1/files/my_htpasswd

@@ -0,0 +1 @@
+grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//

+ 70 - 0
docker/blocks/graphite1/files/nginx.conf

@@ -0,0 +1,70 @@
+daemon off;
+user www-data;
+worker_processes 1;
+pid /var/run/nginx.pid;
+
+events {
+  worker_connections 1024;
+}
+
+http {
+  sendfile on;
+  tcp_nopush on;
+  tcp_nodelay on;
+  keepalive_timeout 65;
+  types_hash_max_size 2048;
+  server_tokens off;
+
+  server_names_hash_bucket_size 32;
+
+  include /etc/nginx/mime.types;
+  default_type application/octet-stream;
+
+  access_log /var/log/nginx/access.log;
+  error_log /var/log/nginx/error.log;
+
+  gzip on;
+  gzip_disable "msie6";
+
+  server {
+    listen 80 default_server;
+    server_name _;
+
+    open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
+
+    location / {
+        proxy_pass                 http://127.0.0.1:8000;
+        proxy_set_header           X-Real-IP   $remote_addr;
+        proxy_set_header           X-Forwarded-For  $proxy_add_x_forwarded_for;
+        proxy_set_header           X-Forwarded-Proto  $scheme;
+        proxy_set_header           X-Forwarded-Server  $host;
+        proxy_set_header           X-Forwarded-Host  $host;
+        proxy_set_header           Host  $host;
+
+        client_max_body_size       10m;
+        client_body_buffer_size    128k;
+
+        proxy_connect_timeout      90;
+        proxy_send_timeout         90;
+        proxy_read_timeout         90;
+
+        proxy_buffer_size          4k;
+        proxy_buffers              4 32k;
+        proxy_busy_buffers_size    64k;
+        proxy_temp_file_write_size 64k;
+    }
+
+    add_header Access-Control-Allow-Origin "*";
+    add_header Access-Control-Allow-Methods "GET, OPTIONS";
+    add_header Access-Control-Allow-Headers "origin, authorization, accept";
+
+    location /content {
+      alias /opt/graphite/webapp/content;
+
+    }
+
+    location /media {
+      alias /usr/share/pyshared/django/contrib/admin/media;
+    }
+  }
+}

+ 8 - 0
docker/blocks/graphite1/files/statsd_config.js

@@ -0,0 +1,8 @@
+{
+  graphitePort: 2003,
+  graphiteHost: "127.0.0.1",
+  port: 8125,
+  mgmt_port: 8126,
+  backends: ['./backends/graphite'],
+  debug: true
+}

+ 19 - 0
docker/blocks/graphite1/files/storage-aggregation.conf

@@ -0,0 +1,19 @@
+[min]
+pattern = \.min$
+xFilesFactor = 0.1
+aggregationMethod = min
+
+[max]
+pattern = \.max$
+xFilesFactor = 0.1
+aggregationMethod = max
+
+[sum]
+pattern = \.count$
+xFilesFactor = 0
+aggregationMethod = sum
+
+[default_average]
+pattern = .*
+xFilesFactor = 0.5
+aggregationMethod = average

+ 16 - 0
docker/blocks/graphite1/files/storage-schemas.conf

@@ -0,0 +1,16 @@
+[carbon]
+pattern = ^carbon\..*
+retentions = 1m:31d,10m:1y,1h:5y
+
+[highres]
+pattern = ^highres.*
+retentions = 1s:1d,1m:7d
+
+[statsd]
+pattern = ^statsd.*
+retentions = 1m:7d,10m:1y
+
+[default]
+pattern = .*
+retentions = 10s:1d,1m:7d,10m:1y
+

+ 26 - 0
docker/blocks/graphite1/files/supervisord.conf

@@ -0,0 +1,26 @@
+[supervisord]
+nodaemon = true
+environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
+
+[program:nginx]
+command = /usr/sbin/nginx
+stdout_logfile = /var/log/supervisor/%(program_name)s.log
+stderr_logfile = /var/log/supervisor/%(program_name)s.log
+autorestart = true
+
+[program:carbon-cache]
+;user = www-data
+command = /opt/graphite/bin/carbon-cache.py --debug start
+stdout_logfile = /var/log/supervisor/%(program_name)s.log
+stderr_logfile = /var/log/supervisor/%(program_name)s.log
+autorestart = true
+
+[program:graphite-webapp]
+;user = www-data
+directory = /opt/graphite/webapp
+environment = PYTHONPATH='/opt/graphite/webapp'
+command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
+stdout_logfile = /var/log/supervisor/%(program_name)s.log
+stderr_logfile = /var/log/supervisor/%(program_name)s.log
+autorestart = true
+