commit 73d37f81a6f0ad2e76d3c9242d24599425ab8ad7 Author: Andrea Dell'Amico Date: Thu May 28 11:32:57 2015 +0200 Major refactoring. Moved all the library roles under 'library/roles' and changed all the occurrances inside all the playbooks. diff --git a/R/defaults/main.yml b/R/defaults/main.yml new file mode 100644 index 00000000..9a795501 --- /dev/null +++ b/R/defaults/main.yml @@ -0,0 +1,65 @@ +--- +# +# To list the installed R packages +# Run R, then execute +# packinfo <- installed.packages (fields = c ("Package", "Version")) +# packinfo[,c("Package", "Version")] +# +# The install/remove script has been taken from here: http://adamj.eu/tech/2014/07/19/installing-and-removing-r-packages-with-ansible/ +# + +r_install_cran_repo: False +#r_cran_mirror_site: http://cran.rstudio.com +r_cran_mirror_site: http://cran.mirror.garr.it/mirrors/CRAN/ +r_base_pkg_version: 2.14.1 +r_packages_state: present + +r_base_packages_list: + - r-base + - jags + +r_plugins_packages_list: + - r-cran-rjags + - r-cran-abind + - r-cran-boot + - r-cran-class + - r-cran-cluster + - r-cran-coda + - r-cran-codetools + - r-cran-foreign + - r-cran-lattice + - r-cran-maptools + - r-cran-mass + - r-cran-matrix + - r-cran-mgcv + - r-cran-nlme + - r-cran-nnet + - r-cran-rpart + - r-cran-sp + - r-cran-spatial + - r-cran-survival + +r_plugins_list_to_install: + - R2WinBUGS + - R2jags + - bayesmix + - coda + - rjags + - runjags + - base + - compiler + - datasets + - grDevices + - graphics + - grid + - methods + - parallel + - splines + - stats + - stats4 + - tcltk + - tools + - utils + +#r_plugins_list_to_remove: + diff --git a/R/tasks/main.yml b/R/tasks/main.yml new file mode 100644 index 00000000..a0cfb881 --- /dev/null +++ b/R/tasks/main.yml @@ -0,0 +1,59 @@ +--- +- name: Install the cran repository key + apt_key: id=E084DAB9 keyserver=keyserver.ubuntu.com state=present + register: update_apt_cache + when: r_install_cran_repo + tags: + - r_software + - r_repo + +- name: Install the cran repository definition + apt_repository: repo='deb http://cran.rstudio.com/bin/linux/ubuntu {{ ansible_distribution_release }}/' state=present + register: update_apt_cache + when: r_install_cran_repo + tags: + - r_software + - r_repo + +- name: Install the cran repository definition + apt_repository: repo='deb {{ r_cran_mirror_site }}/bin/linux/ubuntu {{ ansible_distribution_release }}/' state=absent + register: update_apt_cache + when: not r_install_cran_repo + tags: + - r_software + - r_repo + +- name: Update the apt cache if needed + apt: update_cache=yes + when: ( update_apt_cache | changed ) + tags: + - r_software + - r_repo + +- name: Install the R base packages + apt: pkg={{ item }} state={{ r_packages_state }} + with_items: r_base_packages_list + tags: + - r_software + - r_pkg + +- name: Install the R plugins from the ubuntu repo + apt: pkg={{ item }} state={{ r_packages_state }} + with_items: r_plugins_packages_list + tags: + - r_software + - r_pkg + +- name: Install R plugins from the cran binaries repo + command: > + Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item }}' %in% installed.packages()[,'Package'])) { install.packages(pkgs='{{ item }}', repos=c('{{ r_cran_mirror_site }}/')); print('Added'); } else { print('Already installed'); }" + register: install_plugins_result + failed_when: "install_plugins_result.rc != 0 or 'had non-zero exit status' in install_plugins_result.stderr" + changed_when: "'Added' in install_plugins_result.stdout" + with_items: r_plugins_list_to_install + tags: + - r_software + - r_pkg + + + diff --git a/ansible_ppa/defaults/main.yml b/ansible_ppa/defaults/main.yml new file mode 100644 index 00000000..fef4bdc0 --- /dev/null +++ b/ansible_ppa/defaults/main.yml @@ -0,0 +1,12 @@ +--- +# ansible PKG state: latest, installed, absent +ansible_pkg_state: latest +ansible_cfg_file: /etc/ansible/ansible.cfg +# options: smart, implicit, explicit. +ansible_gathering: smart +ansible_command_warnings: True +ansible_control_path: '%(directory)s/%%h-%%r' +# +# Note: it breaks sudo if there's 'requiretty' inside the sudoers file +ansible_pipelining: True +ansible_scp_if_ssh: True diff --git a/ansible_ppa/tasks/ansible-config.yml b/ansible_ppa/tasks/ansible-config.yml new file mode 100644 index 00000000..146eb334 --- /dev/null +++ b/ansible_ppa/tasks/ansible-config.yml @@ -0,0 +1,32 @@ +--- +- name: Set the gather facts policy + action: configfile path={{ ansible_cfg_file }} key=gathering value='{{ ansible_gathering }}' + tags: + - ansible + - ansible_cfg + +- name: Warn if some shell commands can be avoided using modules + action: configfile path={{ ansible_cfg_file }} key=command_warning value='{{ ansible_command_warnings }}' + tags: + - ansible + - ansible_cfg + +- name: Shorten the ansible control path + action: configfile path={{ ansible_cfg_file }} key=control_path value='{{ ansible_control_path }}' + tags: + - ansible + - ansible_cfg + +- name: Be fast, use pipelining when possible + action: configfile path={{ ansible_cfg_file }} key=pipelining value='{{ ansible_pipelining }}' + tags: + - ansible + - ansible_cfg + +- name: Use scp instead of sftp to transfer files + action: configfile path={{ ansible_cfg_file }} key=scp_if_ssh value='{{ ansible_scp_if_ssh }}' + tags: + - ansible + - ansible_cfg + + diff --git a/ansible_ppa/tasks/ansible-packages.yml b/ansible_ppa/tasks/ansible-packages.yml new file mode 100644 index 00000000..26926f5d --- /dev/null +++ b/ansible_ppa/tasks/ansible-packages.yml @@ -0,0 +1,24 @@ +--- +- name: Remove the now obsolete rquillo ppa for ansible + apt_repository: repo='ppa:rquillo/ansible' state=absent + register: update_apt_cache_rquillo + tags: + - ansible + +- name: Add the ansible ppa for ansible + apt_repository: repo='ppa:ansible/ansible' + register: update_apt_cache + tags: + - ansible + +- name: Update the apt cache if needed + apt: update_cache=yes + when: (update_apt_cache|changed) or (update_apt_cache_rquillo|changed) + tags: + - ansible + +- name: Install the ansible package + apt: pkg=ansible state={{ ansible_pkg_state }} + tags: + - ansible + diff --git a/ansible_ppa/tasks/main.yml b/ansible_ppa/tasks/main.yml new file mode 100644 index 00000000..97007007 --- /dev/null +++ b/ansible_ppa/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- include: ansible-packages.yml +- include: ansible-config.yml + \ No newline at end of file diff --git a/apache/defaults/main.yml b/apache/defaults/main.yml new file mode 100644 index 00000000..dec3dae5 --- /dev/null +++ b/apache/defaults/main.yml @@ -0,0 +1,32 @@ +--- +apache_user: www-data +apache_group: '{{ apache_user }}' + +apache_packages: + - apache2 + - apache2-mpm-prefork + - apache2-utils + - libapache2-mod-xsendfile + - unzip + - zip + +apache_default_modules: + - headers + - rewrite + - expires + - xsendfile + +apache_basic_auth: False +apache_basic_auth_single_file: True +apache_basic_auth_dir: /etc/apache2/auth +apache_basic_auth_file: '{{ apache_basic_auth_dir }}/htpasswd' + +apache_basic_auth_modules: + - auth_basic + - authn_file + - authz_user + +# Put them in a vault file. auth_file is optional. Not used when apache_basic_auth_single_file is true +# apache_basic_users: +# - { username:'', password:'', state:'present,absent', auth_file:'path_to_file' } + diff --git a/apache/handlers/main.yml b/apache/handlers/main.yml new file mode 100644 index 00000000..6991f5c3 --- /dev/null +++ b/apache/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: apache2 reload + service: name=apache2 state=reloaded + diff --git a/apache/tasks/apache-basic-auth.yml b/apache/tasks/apache-basic-auth.yml new file mode 100644 index 00000000..fbabdad5 --- /dev/null +++ b/apache/tasks/apache-basic-auth.yml @@ -0,0 +1,36 @@ +--- +- name: Load the basic auth modules + apache2_module: name={{ item }} state=present + with_items: apache_basic_auth_modules + notify: apache2 reload + tags: + - apache + - apache_basic_auth + +- name: Create the authentication directory + file: path={{ apache_basic_auth_dir }} mode=0750 owner=root group={{ apache_group }} state=directory + tags: + - apache + - apache_basic_auth + +- name: Install the python-passlib library + apt: pkg=python-passlib state=present + tags: + - apache + - apache_basic_auth + +- name: Create the basic auth file + htpasswd: path={{ apache_basic_auth_file }} name={{ item.username }} password={{ item.password }} create=yes state={{ item.state }} + when: apache_basic_users is defined and apache_basic_auth_single_file + tags: + - apache + - apache_basic_auth + +- name: Create the basic auth file + htpasswd: path={{ item.auth_file }} name={{ item.username }} password={{ item.password }} create=yes state={{ item.state }} + with_items: apache_basic_users + when: apache_basic_users is defined and not apache_basic_auth_single_file + tags: + - apache + - apache_basic_auth + diff --git a/apache/tasks/apache.yml b/apache/tasks/apache.yml new file mode 100644 index 00000000..c2a9e981 --- /dev/null +++ b/apache/tasks/apache.yml @@ -0,0 +1,22 @@ +--- +- name: Install the apache packages + apt: pkg={{ item }} state=installed force=yes + with_items: apache_packages + tags: + - apache + +- name: Load the required modules + apache2_module: name={{ item }} state=present + with_items: apache_default_modules + notify: apache2 reload + tags: + - apache + +- name: Remove the default virtualhost file + file: dest=/etc/apache2/sites-enabled/{{ item }} state=absent + with_items: + - 000-default + - 000-default.conf + notify: apache2 reload + tags: + - apache diff --git a/apache/tasks/main.yml b/apache/tasks/main.yml new file mode 100644 index 00000000..2c9a3c64 --- /dev/null +++ b/apache/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- include: apache.yml +- include: apache-basic-auth.yml + when: apache_basic_auth diff --git a/chkconfig/tasks/main.yml b/chkconfig/tasks/main.yml new file mode 100644 index 00000000..bb50b59d --- /dev/null +++ b/chkconfig/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: install chkconfig and insserv + apt: pkg={{ item }} state=present + with_items: + - chkconfig + - insserv + tags: + - chkconfig + +# Workaround for a bug in the insserv package. insserv is needed by chkconfig +- name: Workaround for a bug in the insserv package. + action: file src=/usr/lib/insserv/insserv dest=/sbin/insserv state=link + tags: + - chkconfig + diff --git a/deb-apt-setup/tasks/main.yml b/deb-apt-setup/tasks/main.yml new file mode 100644 index 00000000..885774fd --- /dev/null +++ b/deb-apt-setup/tasks/main.yml @@ -0,0 +1,18 @@ +--- +# First things first: install the basic requirements with a raw command +- name: install python-apt + raw: "apt-get update; apt-get install -y python python-apt lsb-release" + tags: + - pythonapt + +- name: Install python-software-properties + apt: pkg=python-software-properties state=installed + tags: + - pythonapt + +- name: Install software-properties-common on quantal distributions + apt: pkg=software-properties-common state=installed + when: is_quantal + tags: + - pythonapt + diff --git a/deb-set-locale/defaults/main.yml b/deb-set-locale/defaults/main.yml new file mode 100644 index 00000000..3593f0a4 --- /dev/null +++ b/deb-set-locale/defaults/main.yml @@ -0,0 +1,3 @@ +--- +deb_default_locale: "en_US.UTF-8" +deb_locales: "{{ deb_default_locale }} en_US, it_IT.UTF-8 it_IT" diff --git a/deb-set-locale/tasks/main.yml b/deb-set-locale/tasks/main.yml new file mode 100644 index 00000000..74835816 --- /dev/null +++ b/deb-set-locale/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Generate locales + debconf: name=locales question='locales/locales_to_be_generated' value='{{ deb_locales }}' vtype='multiselect' + tags: + - locale + + +- name: Update the locale default + debconf: name=locales question='locales/default_environment_locale' value='{{ deb_default_locale }}' vtype='select' + tags: + - locale + diff --git a/dnet-apt-repo/tasks/main.yml b/dnet-apt-repo/tasks/main.yml new file mode 100644 index 00000000..aaa6d34d --- /dev/null +++ b/dnet-apt-repo/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: apt key for the internal ppa repository + apt_key: url=http://ppa.research-infrastructures.eu/system/keys/system-archive.asc state=present + when: has_apt + register: update_apt_cache + tags: + - dnet + +- name: Install the D-Net apt repositories + apt_repository: repo='{{ item }}' + with_items: + - deb http://ppa.research-infrastructures.eu/dnet lucid main + - deb http://ppa.research-infrastructures.eu/dnet unstable main + when: has_apt + register: update_apt_cache + tags: + - dnet + +- name: Install the D-NET repository key + action: apt_key url=http://ppa.research-infrastructures.eu/dnet/keys/dnet-archive.asc + tags: + - dnet + +- name: Update the apt cache + apt: update_cache=yes + when: update_apt_cache.changed + ignore_errors: True + tags: + - dnet + diff --git a/docker/handlers/main.yml b/docker/handlers/main.yml new file mode 100644 index 00000000..30b27fb6 --- /dev/null +++ b/docker/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: Restart docker + service: name=docker state=restarted diff --git a/docker/tasks/main.yml b/docker/tasks/main.yml new file mode 100644 index 00000000..d452fd2a --- /dev/null +++ b/docker/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- include: pkgs.yml + +- name: Enable Docker TCP on local bridge (for docker swarm) + action: configfile path=/etc/default/docker syntax=shell key=DOCKER_OPTS value="\"-H tcp://172.17.42.1:2375 -H unix:///var/run/docker.sock\"" + notify: Restart docker + tags: docker-conf diff --git a/docker/tasks/pkgs.yml b/docker/tasks/pkgs.yml new file mode 100644 index 00000000..381062ce --- /dev/null +++ b/docker/tasks/pkgs.yml @@ -0,0 +1,16 @@ +--- +- name: Add Docker repository key + apt_key: url="https://get.docker.io/gpg" + tags: + - docker + +- name: Add Docker repository + apt_repository: repo='deb http://get.docker.io/ubuntu docker main' update_cache=yes + tags: + - docker + +- name: Install Docker + apt: pkg=lxc-docker state=present + tags: + - docker + diff --git a/fail2ban/defaults/main.yml b/fail2ban/defaults/main.yml new file mode 100644 index 00000000..7b4c85d9 --- /dev/null +++ b/fail2ban/defaults/main.yml @@ -0,0 +1,33 @@ +--- + +# Fail2ban +# Needed by the fail2ban template +cm_ip: 146.48.123.18 +monitoring_ip: 146.48.123.23 +# ban time in seconds. 86400 == 1 day +f2b_ban_time: 86400 +f2b_findtime: 600 +f2b_maxretry: 5 +f2b_default_backend: auto +f2b_usedns: warn +f2b_dest_email: sysadmin@research-infrastructures.eu +f2b_sender_email: denyhosts@research-infrastructures.eu +f2b_default_banaction: iptables-multiport +# Default action: ban. Not send email +f2b_default_action: action_ +f2b_default_iptableschain: INPUT +f2b_ssh_enabled: true +f2b_ssh_ddos_enabled: true +f2b_apache_auth_enabled: false +f2b_apache_noscript_enabled: false +f2b_apache_overflow_enabled: false +f2b_php_url_popen: false +f2b_nginx_auth_enabled: false +f2b_vsftpd_enabled: false +f2b_vsftpd_logpath: /var/log/vsftpd.log +f2b_recidive_enabled: true +# 604800: one week +f2b_recidive_findtime: 604800 +# 14515200 24 weeks +f2b_recidive_ban_time: 14515200 + diff --git a/fail2ban/handlers/main.yml b/fail2ban/handlers/main.yml new file mode 100644 index 00000000..cdd2d5e8 --- /dev/null +++ b/fail2ban/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: Restart fail2ban + service: name=fail2ban state=restarted enabled=yes + diff --git a/fail2ban/meta/main.yml b/fail2ban/meta/main.yml new file mode 100644 index 00000000..efcb30ba --- /dev/null +++ b/fail2ban/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: '../../library/roles/iptables' diff --git a/fail2ban/tasks/fail2ban.yml b/fail2ban/tasks/fail2ban.yml new file mode 100644 index 00000000..7f485499 --- /dev/null +++ b/fail2ban/tasks/fail2ban.yml @@ -0,0 +1,14 @@ +--- +- name: install fail2ban ubuntu >= 14.04 + apt: pkg={{ item }} state=installed + with_items: + - fail2ban + tags: + - fail2ban + +- name: Install the fail2ban custom jail file + template: src=jail.local.j2 dest=/etc/fail2ban/jail.local owner=root group=root mode=444 + notify: Restart fail2ban + tags: + - fail2ban + diff --git a/fail2ban/tasks/main.yml b/fail2ban/tasks/main.yml new file mode 100644 index 00000000..47ddfef2 --- /dev/null +++ b/fail2ban/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- include: fail2ban.yml + when: is_trusty + diff --git a/fail2ban/templates/jail.local.j2 b/fail2ban/templates/jail.local.j2 new file mode 100644 index 00000000..6cf14ac0 --- /dev/null +++ b/fail2ban/templates/jail.local.j2 @@ -0,0 +1,254 @@ +# Fail2Ban configuration file. +# +# This file was composed for Debian systems from the original one +# provided now under /usr/share/doc/fail2ban/examples/jail.conf +# for additional examples. +# +# Comments: use '#' for comment lines and ';' for inline comments +# +# To avoid merges during upgrades DO NOT MODIFY THIS FILE +# and rather provide your changes in /etc/fail2ban/jail.local +# + +# The DEFAULT allows a global definition of the options. They can be overridden +# in each jail afterwards. + +[DEFAULT] + +# "ignoreip" can be an IP address, a CIDR mask or a DNS host. Fail2ban will not +# ban a host which matches an address in this list. Several addresses can be +# defined using space separator. +ignoreip = 127.0.0.1/8 {{ cm_ip }} {{ monitoring_ip }} + +# "bantime" is the number of seconds that a host is banned. +bantime = {{ f2b_ban_time }} + +# A host is banned if it has generated "maxretry" during the last "findtime" +# seconds. +findtime = {{ f2b_findtime }} +maxretry = {{ f2b_maxretry }} + +# "backend" specifies the backend used to get files modification. +# Available options are "pyinotify", "gamin", "polling" and "auto". +# This option can be overridden in each jail as well. +# +# pyinotify: requires pyinotify (a file alteration monitor) to be installed. +# If pyinotify is not installed, Fail2ban will use auto. +# gamin: requires Gamin (a file alteration monitor) to be installed. +# If Gamin is not installed, Fail2ban will use auto. +# polling: uses a polling algorithm which does not require external libraries. +# auto: will try to use the following backends, in order: +# pyinotify, gamin, polling. +backend = {{ f2b_default_backend }} + +# "usedns" specifies if jails should trust hostnames in logs, +# warn when reverse DNS lookups are performed, or ignore all hostnames in logs +# +# yes: if a hostname is encountered, a reverse DNS lookup will be performed. +# warn: if a hostname is encountered, a reverse DNS lookup will be performed, +# but it will be logged as a warning. +# no: if a hostname is encountered, will not be used for banning, +# but it will be logged as info. +usedns = {{ f2b_usedns }} + +# +# Destination email address used solely for the interpolations in +# jail.{conf,local} configuration files. +destemail = {{ f2b_dest_email }} + +# +# Name of the sender for mta actions +sendername = {{ f2b_sender_email }} + +# +# ACTIONS +# + +# Default banning action (e.g. iptables, iptables-new, +# iptables-multiport, shorewall, etc) It is used to define +# action_* variables. Can be overridden globally or per +# section within jail.local file +banaction = {{ f2b_default_banaction }} + +# email action. Since 0.8.1 upstream fail2ban uses sendmail +# MTA for the mailing. Change mta configuration parameter to mail +# if you want to revert to conventional 'mail'. +mta = sendmail + +# Default protocol +protocol = tcp + +# Specify chain where jumps would need to be added in iptables-* actions +chain = {{ f2b_default_iptableschain }} + +# +# Action shortcuts. To be used to define action parameter + +# The simplest action to take: ban only +action_ = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + +# ban & send an e-mail with whois report to the destemail. +action_mw = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s", sendername="%(sendername)s"] + +# ban & send an e-mail with whois report and relevant log lines +# to the destemail. +action_mwl = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + %(mta)s-whois-lines[name=%(__name__)s, dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s", sendername="%(sendername)s"] + +# Choose default action. To change, just override value of 'action' with the +# interpolation to the chosen action shortcut (e.g. action_mw, action_mwl, etc) in jail.local +# globally (section [DEFAULT]) or per specific section +action = %({{ f2b_default_action }})s + +# +# JAILS +# + +# Next jails corresponds to the standard configuration in Fail2ban 0.6 which +# was shipped in Debian. Enable any defined here jail by including +# +# [SECTION_NAME] +# enabled = true + +# +# in /etc/fail2ban/jail.local. +# +# Optionally you may override any other parameter (e.g. banaction, +# action, port, logpath, etc) in that section within jail.local + +[ssh] + +enabled = {{ f2b_ssh_enabled }} +port = ssh +filter = sshd +logpath = /var/log/auth.log +maxretry = {{ f2b_maxretry }} + +[dropbear] + +enabled = false +port = ssh +filter = dropbear +logpath = /var/log/auth.log +maxretry = 6 + +# Generic filter for pam. Has to be used with action which bans all ports +# such as iptables-allports, shorewall +[pam-generic] + +enabled = false +# pam-generic filter can be customized to monitor specific subset of 'tty's +filter = pam-generic +# port actually must be irrelevant but lets leave it all for some possible uses +port = all +banaction = iptables-allports +port = anyport +logpath = /var/log/auth.log +maxretry = 6 + +[xinetd-fail] + +enabled = false +filter = xinetd-fail +port = all +banaction = iptables-multiport-log +logpath = /var/log/daemon.log +maxretry = 2 + + +[ssh-ddos] + +enabled = {{ f2b_ssh_ddos_enabled }} +port = ssh +filter = sshd-ddos +logpath = /var/log/auth.log +maxretry = {{ f2b_maxretry }} + + +# +# HTTP servers +# + +# default action is now multiport, so apache-multiport jail was left +# for compatibility with previous (<0.7.6-2) releases +[apache-multiport] + +enabled = {{ f2b_apache_auth_enabled }} +port = http,https +filter = apache-auth +logpath = /var/log/apache*/*error.log +maxretry = 6 + +[apache-noscript] + +enabled = {{ f2b_apache_noscript_enabled }} +port = http,https +filter = apache-noscript +logpath = /var/log/apache*/*error.log +maxretry = 6 + +[apache-overflows] + +enabled = {{ f2b_apache_overflow_enabled }} +port = http,https +filter = apache-overflows +logpath = /var/log/apache*/*error.log +maxretry = 2 + +# Ban attackers that try to use PHP's URL-fopen() functionality +# through GET/POST variables. - Experimental, with more than a year +# of usage in production environments. + +[php-url-fopen] + +enabled = {{ f2b_php_url_popen }} +port = http,https +filter = php-url-fopen +logpath = /var/www/*/logs/access_log + +# A simple PHP-fastcgi jail which works with lighttpd. +# If you run a lighttpd server, then you probably will +# find these kinds of messages in your error_log: +# ALERT – tried to register forbidden variable ‘GLOBALS’ +# through GET variables (attacker '1.2.3.4', file '/var/www/default/htdocs/index.php') + +[nginx-http-auth] + +enabled = {{ f2b_nginx_auth_enabled }} +filter = nginx-http-auth +port = http,https +logpath = /var/log/nginx/error.log + +# +# FTP servers +# + +[vsftpd] + +enabled = {{ f2b_vsftpd_enabled }} +port = ftp,ftp-data,ftps,ftps-data +filter = vsftpd +logpath = {{ f2b_vsftpd_logpath }} +# or overwrite it in jails.local to be +# logpath = /var/log/auth.log +# if you want to rely on PAM failed login attempts +# vsftpd's failregex should match both of those formats +maxretry = 6 + + +# Jail for more extended banning of persistent abusers +# !!! WARNING !!! +# Make sure that your loglevel specified in fail2ban.conf/.local +# is not at DEBUG level -- which might then cause fail2ban to fall into +# an infinite loop constantly feeding itself with non-informative lines +[recidive] + +enabled = {{ f2b_recidive_enabled }} +filter = recidive +logpath = /var/log/fail2ban.log +action = iptables-allports[name=recidive] + sendmail-whois-lines[name=recidive, logpath=/var/log/fail2ban.log] +bantime = {{ f2b_recidive_ban_time }} +findtime = {{ f2b_recidive_findtime }} +maxretry = 5 diff --git a/ganglia/defaults/main.yml b/ganglia/defaults/main.yml new file mode 100644 index 00000000..374af76e --- /dev/null +++ b/ganglia/defaults/main.yml @@ -0,0 +1,10 @@ +# These are for reference only. +# Define your own set of variables +# +ganglia_gmond_cluster: "CNR-ISTI NeMIS Cluster" +ganglia_gmond_cluster_port: 8649 +ganglia_gmond_mcast_addr: 239.2.11.71 +ganglia_gmetad_host: monitoring.research-infrastructures.eu +ganglia_gmond_send_metadata_interval: 60 +# Needed to build the correct firewall rules when jmxtrans is in use +ganglia_gmond_use_jmxtrans: False diff --git a/ganglia/files/modpython.conf b/ganglia/files/modpython.conf new file mode 100644 index 00000000..fad29a93 --- /dev/null +++ b/ganglia/files/modpython.conf @@ -0,0 +1,9 @@ +modules { + module { + name = "python_module" + path = "/usr/lib/ganglia/modpython.so" + params = "/usr/lib/ganglia/python_modules" + } +} + +include('/etc/ganglia/conf.d/*.pyconf') diff --git a/ganglia/handlers/main.yml b/ganglia/handlers/main.yml new file mode 100644 index 00000000..7f6dc7ea --- /dev/null +++ b/ganglia/handlers/main.yml @@ -0,0 +1,2 @@ +- name: Restart ganglia monitor + service: name=ganglia-monitor state=restarted diff --git a/ganglia/tasks/main.yml b/ganglia/tasks/main.yml new file mode 100644 index 00000000..b4873dd7 --- /dev/null +++ b/ganglia/tasks/main.yml @@ -0,0 +1,91 @@ +--- +- name: Install the ganglia client + apt: pkg={{ item }} state=installed + with_items: + - ganglia-monitor + tags: + - monitoring + - ganglia + +- name: Install the ganglia linux specific plugins. We need at least ubuntu trusty or debian 7 + apt: pkg={{ item }} state=installed force=yes + with_items: + - ganglia-modules-linux + - ganglia-monitor-python + notify: + Restart ganglia monitor + when: is_trusty_or_debian7 + tags: + - monitoring + - ganglia + +- name: Distribute the ganglia configuration file for Ubuntu >= 12.04 + template: src=gmond.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444 + when: is_not_ubuntu_less_than_precise + notify: + Restart ganglia monitor + tags: + - monitoring + - ganglia + +- name: Distribute the ganglia configuration file for Debian 7 + template: src=gmond.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444 + when: is_debian7 + notify: + Restart ganglia monitor + tags: + - monitoring + - ganglia + +- name: Distribute the ganglia configuration file for Ubuntu < 12.04 and >= 10.04 and Debian 6 + template: src=gmond-3.1.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444 + when: is_ubuntu_between_10_04_and_11_04_and_is_debian_6 + notify: + Restart ganglia monitor + tags: + - monitoring + - ganglia + +- name: Distribute the ganglia configuration file for Ubuntu < 10.04 and Debian 4 + template: src=gmond-2.5.j2 dest=/etc/gmond.conf owner=root group=root mode=444 + when: + - is_ubuntu_between_8_and_9_and_is_debian_4 + notify: + Restart ganglia monitor + tags: + - monitoring + - ganglia + +- name: Distribute the ganglia configuration on broken hardy 8.04.4 + template: src=gmond-2.5.j2 dest=/etc/gmond.conf owner=root group=root mode=444 + when: + - is_broken_hardy_lts + notify: + Restart ganglia monitor + tags: + - monitoring + - ganglia + +- name: Setup the ganglia directory for python modules + file: dest=/usr/lib/ganglia/python_modules state=directory + when: is_precise + tags: + - ganglia + - monitoring + +- name: Ensure that the ganglia include conf dir exists + file: path=/etc/ganglia/conf.d state=directory + when: is_precise + tags: + - ganglia + - monitoring + +- name: Setup the ganglia configuration for python modules + copy: src=modpython.conf dest=/etc/ganglia/conf.d/modpython.conf owner=root group=root mode=0644 + notify: + - Restart ganglia monitor + when: is_precise + tags: + - monitoring + - ganglia + diff --git a/ganglia/templates/gmond-2.5.j2 b/ganglia/templates/gmond-2.5.j2 new file mode 100644 index 00000000..75a251b8 --- /dev/null +++ b/ganglia/templates/gmond-2.5.j2 @@ -0,0 +1,121 @@ +# $Id: gmond.conf,v 1.3 2004/01/20 19:15:23 sacerdoti Exp $ +# This is the configuration file for the Ganglia Monitor Daemon (gmond) +# Documentation can be found at http://ganglia.sourceforge.net/docs/ +# +# To change a value from it's default simply uncomment the line +# and alter the value +##################### +# +# The name of the cluster this node is a part of +# default: "unspecified" +name "{{ ganglia_gmond_cluster }}" +# +# The owner of this cluster. Represents an administrative +# domain. The pair name/owner should be unique for all clusters +# in the world. +# default: "unspecified" +owner "{{ ganglia_gmond_cluster_owner }}" +# +# The latitude and longitude GPS coordinates of this cluster on earth. +# Specified to 1 mile accuracy with two decimal places per axis in Decimal +# DMS format: "N61.18 W130.50". +# default: "unspecified" +# latlong "N32.87 W117.22" +# +# The URL for more information on the Cluster. Intended to give purpose, +# owner, administration, and account details for this cluster. +# default: "unspecified" +# url "http://www.mycluster.edu/" +# +# The location of this host in the cluster. Given as a 3D coordinate: +# "Rack,Rank,Plane" that corresponds to a Euclidean coordinate "x,y,z". +# default: "unspecified" +location "{{ ganglia_gmond_location }}" +# +# The multicast channel for gmond to send/receive data on +# default: 239.2.11.71 +mcast_channel {{ ganglia_gmond_mcast_addr }} +# +# The multicast port for gmond to send/receive data on +# default: 8649 +mcast_port {{ ganglia_gmond_cluster_port }} +# +# The multicast interface for gmond to send/receive data on +# default: the kernel decides based on routing configuration +# mcast_if eth1 +# +# The multicast Time-To-Live (TTL) for outgoing messages +# default: 1 +# mcast_ttl 1 +# +# The number of threads listening to multicast traffic +# default: 2 +# mcast_threads 2 +# +# Which port should gmond listen for XML requests on +# default: 8649 +xml_port {{ ganglia_gmond_cluster_port }} +# +# The number of threads answering XML requests +# default: 2 +# xml_threads 2 +# +# Hosts ASIDE from "127.0.0.1"/localhost and those multicasting +# on the same multicast channel which you will share your XML +# data with. Multiple hosts are allowed on multiple lines. +# Can be specified with either hostnames or IP addresses. +# default: none +# trusted_hosts 1.1.1.1 1.1.1.2 1.1.1.3 \ +# 2.3.2.3 3.4.3.4 5.6.5.6 +trusted_hosts {{ ganglia_gmetad_host }} +# +# The number of nodes in your cluster. This value is used in the +# creation of the cluster hash. +# default: 1024 +# num_nodes 1024 +# +# The number of custom metrics this gmond will be storing. This +# value is used in the creation of the host custom_metrics hash. +# default: 16 +# num_custom_metrics 16 +# +# Run gmond in "mute" mode. Gmond will only listen to the multicast +# channel but will not send any data on the channel. +# default: off +mute off +# +# Run gmond in "deaf" mode. Gmond will only send data on the multicast +# channel but will not listen/store any data from the channel. +# default: off +deaf off +# +# Run gmond in "debug" mode. Gmond will not background. Debug messages +# are sent to stdout. Value from 0-100. The higher the number the more +# detailed debugging information will be sent. +# default: 0 +# debug_level 10 +# +# If you don't want gmond to setuid, set this to "on" +# default: off +# no_setuid on +# +# Which user should gmond run as? +# default: nobody +setuid ganglia +# +# If you do not want this host to appear in the gexec host list, set +# this value to "on" +# default: off +# no_gexec on +# +# If you want any host which connects to the gmond XML to receive +# data, then set this value to "on" +# default: off +all_trusted on +# +# If you want dead nodes to "time out", enter a nonzero value here. If specified, +# a host will be removed from our state if we have not heard from it in this +# number of seconds. +# default: 0 (immortal) +# host_dmax 108000 + diff --git a/ganglia/templates/gmond-3.1.j2 b/ganglia/templates/gmond-3.1.j2 new file mode 100644 index 00000000..30cb3b29 --- /dev/null +++ b/ganglia/templates/gmond-3.1.j2 @@ -0,0 +1,342 @@ +/* This configuration is as close to 2.5.x default behavior as possible + The values closely match ./gmond/metric.h definitions in 2.5.x */ +globals { + daemonize = yes + setuid = yes + user = ganglia + debug_level = 0 +# max_udp_msg_len = 1472 + mute = no + deaf = no + host_dmax = 3600 /*secs */ + cleanup_threshold = 300 /*secs */ + gexec = no + allow_extra_data = yes + send_metadata_interval = 60 +} + +/* If a cluster attribute is specified, then all gmond hosts are wrapped inside + * of a tag. If you do not specify a cluster tag, then all will + * NOT be wrapped inside of a tag. */ +cluster { + name = "{{ ganglia_gmond_cluster }}" + owner = "{{ ganglia_gmond_cluster_owner }}" + latlong = "unspecified" + url = "unspecified" +} + +/* The host section describes attributes of the host, like the location */ +host { + location = "{{ ganglia_gmond_location }}" +} + +/* Feel free to specify as many udp_send_channels as you like. Gmond + used to only support having a single channel */ +udp_send_channel { + mcast_join = {{ ganglia_gmond_mcast_addr }} + port = {{ ganglia_gmond_cluster_port }} + ttl = 1 +} + +/* You can specify as many udp_recv_channels as you like as well. */ +udp_recv_channel { + mcast_join = {{ ganglia_gmond_mcast_addr }} + port = {{ ganglia_gmond_cluster_port }} +} + +udp_recv_channel { + bind = {{ ansible_fqdn }} + port = {{ ganglia_gmond_cluster_port }} +} + +/* You can specify as many tcp_accept_channels as you like to share + an xml description of the state of the cluster */ +tcp_accept_channel { + port = {{ ganglia_gmond_cluster_port }} +} + +/* Each metrics module that is referenced by gmond must be specified and + loaded. If the module has been statically linked with gmond, it does not + require a load path. However all dynamically loadable modules must include + a load path. */ +modules { + module { + name = "core_metrics" + } + module { + name = "cpu_module" + path = "/usr/lib/ganglia/modcpu.so" + } + module { + name = "disk_module" + path = "/usr/lib/ganglia/moddisk.so" + } + module { + name = "load_module" + path = "/usr/lib/ganglia/modload.so" + } + module { + name = "mem_module" + path = "/usr/lib/ganglia/modmem.so" + } + module { + name = "net_module" + path = "/usr/lib/ganglia/modnet.so" + } + module { + name = "proc_module" + path = "/usr/lib/ganglia/modproc.so" + } + module { + name = "sys_module" + path = "/usr/lib/ganglia/modsys.so" + } +} + +include ('/etc/ganglia/conf.d/*.conf') + + +/* The old internal 2.5.x metric array has been replaced by the following + collection_group directives. What follows is the default behavior for + collecting and sending metrics that is as close to 2.5.x behavior as + possible. */ + +/* This collection group will cause a heartbeat (or beacon) to be sent every + 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses + the age of the running gmond. */ +collection_group { + collect_once = yes + time_threshold = 20 + metric { + name = "heartbeat" + } +} + +/* This collection group will send general info about this host every 1200 secs. + This information doesn't change between reboots and is only collected once. */ +collection_group { + collect_once = yes + time_threshold = 1200 + metric { + name = "cpu_num" + title = "CPU Count" + } + metric { + name = "cpu_speed" + title = "CPU Speed" + } + metric { + name = "mem_total" + title = "Memory Total" + } + /* Should this be here? Swap can be added/removed between reboots. */ + metric { + name = "swap_total" + title = "Swap Space Total" + } + metric { + name = "boottime" + title = "Last Boot Time" + } + metric { + name = "machine_type" + title = "Machine Type" + } + metric { + name = "os_name" + title = "Operating System" + } + metric { + name = "os_release" + title = "Operating System Release" + } + metric { + name = "location" + title = "Location" + } +} + +/* This collection group will send the status of gexecd for this host every 300 secs */ +/* Unlike 2.5.x the default behavior is to report gexecd OFF. */ +collection_group { + collect_once = yes + time_threshold = 300 + metric { + name = "gexec" + title = "Gexec Status" + } +} + +/* This collection group will collect the CPU status info every 20 secs. + The time threshold is set to 90 seconds. In honesty, this time_threshold could be + set significantly higher to reduce unneccessary network chatter. */ +collection_group { + collect_every = 20 + time_threshold = 180 + /* CPU status */ + metric { + name = "cpu_user" + value_threshold = "1.0" + title = "CPU User" + } + metric { + name = "cpu_system" + value_threshold = "1.0" + title = "CPU System" + } + metric { + name = "cpu_idle" + value_threshold = "5.0" + title = "CPU Idle" + } + metric { + name = "cpu_nice" + value_threshold = "1.0" + title = "CPU Nice" + } + metric { + name = "cpu_aidle" + value_threshold = "5.0" + title = "CPU aidle" + } + metric { + name = "cpu_wio" + value_threshold = "1.0" + title = "CPU wio" + } + /* The next two metrics are optional if you want more detail... + ... since they are accounted for in cpu_system. + metric { + name = "cpu_intr" + value_threshold = "1.0" + title = "CPU intr" + } + metric { + name = "cpu_sintr" + value_threshold = "1.0" + title = "CPU sintr" + } + */ +} + +collection_group { + collect_every = 20 + time_threshold = 90 + /* Load Averages */ + metric { + name = "load_one" + value_threshold = "1.0" + title = "One Minute Load Average" + } + metric { + name = "load_five" + value_threshold = "1.0" + title = "Five Minute Load Average" + } + metric { + name = "load_fifteen" + value_threshold = "1.0" + title = "Fifteen Minute Load Average" + } +} + +/* This group collects the number of running and total processes */ +collection_group { + collect_every = 80 + time_threshold = 950 + metric { + name = "proc_run" + value_threshold = "1.0" + title = "Total Running Processes" + } + metric { + name = "proc_total" + value_threshold = "1.0" + title = "Total Processes" + } +} + +/* This collection group grabs the volatile memory metrics every 40 secs and + sends them at least every 180 secs. This time_threshold can be increased + significantly to reduce unneeded network traffic. */ +collection_group { + collect_every = 40 + time_threshold = 180 + metric { + name = "mem_free" + value_threshold = "1024.0" + title = "Free Memory" + } + metric { + name = "mem_shared" + value_threshold = "1024.0" + title = "Shared Memory" + } + metric { + name = "mem_buffers" + value_threshold = "1024.0" + title = "Memory Buffers" + } + metric { + name = "mem_cached" + value_threshold = "1024.0" + title = "Cached Memory" + } + metric { + name = "swap_free" + value_threshold = "1024.0" + title = "Free Swap Space" + } +} + +collection_group { + collect_every = 40 + time_threshold = 300 + metric { + name = "bytes_out" + value_threshold = 4096 + title = "Bytes Sent" + } + metric { + name = "bytes_in" + value_threshold = 4096 + title = "Bytes Received" + } + metric { + name = "pkts_in" + value_threshold = 256 + title = "Packets Received" + } + metric { + name = "pkts_out" + value_threshold = 256 + title = "Packets Sent" + } +} + +/* Different than 2.5.x default since the old config made no sense */ +collection_group { + collect_every = 1800 + time_threshold = 3600 + metric { + name = "disk_total" + value_threshold = 1.0 + title = "Total Disk Space" + } +} + +collection_group { + collect_every = 40 + time_threshold = 180 + metric { + name = "disk_free" + value_threshold = 1.0 + title = "Disk Space Available" + } + metric { + name = "part_max_used" + value_threshold = 1.0 + title = "Maximum Disk Space Used" + } +} + diff --git a/ganglia/templates/gmond.j2 b/ganglia/templates/gmond.j2 new file mode 100644 index 00000000..dc2ef39c --- /dev/null +++ b/ganglia/templates/gmond.j2 @@ -0,0 +1,343 @@ +/* This configuration is as close to 2.5.x default behavior as possible + The values closely match ./gmond/metric.h definitions in 2.5.x */ +globals { + daemonize = yes + setuid = yes + user = ganglia + debug_level = 0 +# max_udp_msg_len = 1472 + mute = no + deaf = no + host_dmax = 3600 /*secs */ + cleanup_threshold = 300 /*secs */ + gexec = no + allow_extra_data = yes + send_metadata_interval = {{ ganglia_gmond_send_metadata_interval }} +} + +/* If a cluster attribute is specified, then all gmond hosts are wrapped inside + * of a tag. If you do not specify a cluster tag, then all will + * NOT be wrapped inside of a tag. */ +cluster { + name = "{{ ganglia_gmond_cluster }}" + owner = "{{ ganglia_gmond_cluster_owner }}" + latlong = "unspecified" + url = "unspecified" +} + +/* The host section describes attributes of the host, like the location */ +host { + location = "{{ ganglia_gmond_location }}" +} + +/* Feel free to specify as many udp_send_channels as you like. Gmond + used to only support having a single channel */ +udp_send_channel { + bind_hostname = yes + mcast_join = {{ ganglia_gmond_mcast_addr }} + port = {{ ganglia_gmond_cluster_port }} + ttl = 1 +} + +/* You can specify as many udp_recv_channels as you like as well. */ +udp_recv_channel { + mcast_join = {{ ganglia_gmond_mcast_addr }} + port = {{ ganglia_gmond_cluster_port }} +} + +udp_recv_channel { + bind = {{ ansible_fqdn }} + port = {{ ganglia_gmond_cluster_port }} +} + +/* You can specify as many tcp_accept_channels as you like to share + an xml description of the state of the cluster */ +tcp_accept_channel { + port = {{ ganglia_gmond_cluster_port }} +} + +/* Each metrics module that is referenced by gmond must be specified and + loaded. If the module has been statically linked with gmond, it does not + require a load path. However all dynamically loadable modules must include + a load path. */ +modules { + module { + name = "core_metrics" + } + module { + name = "cpu_module" + path = "/usr/lib/ganglia/modcpu.so" + } + module { + name = "disk_module" + path = "/usr/lib/ganglia/moddisk.so" + } + module { + name = "load_module" + path = "/usr/lib/ganglia/modload.so" + } + module { + name = "mem_module" + path = "/usr/lib/ganglia/modmem.so" + } + module { + name = "net_module" + path = "/usr/lib/ganglia/modnet.so" + } + module { + name = "proc_module" + path = "/usr/lib/ganglia/modproc.so" + } + module { + name = "sys_module" + path = "/usr/lib/ganglia/modsys.so" + } +} + +include ('/etc/ganglia/conf.d/*.conf') + + +/* The old internal 2.5.x metric array has been replaced by the following + collection_group directives. What follows is the default behavior for + collecting and sending metrics that is as close to 2.5.x behavior as + possible. */ + +/* This collection group will cause a heartbeat (or beacon) to be sent every + 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses + the age of the running gmond. */ +collection_group { + collect_once = yes + time_threshold = 20 + metric { + name = "heartbeat" + } +} + +/* This collection group will send general info about this host every 1200 secs. + This information doesn't change between reboots and is only collected once. */ +collection_group { + collect_once = yes + time_threshold = 1200 + metric { + name = "cpu_num" + title = "CPU Count" + } + metric { + name = "cpu_speed" + title = "CPU Speed" + } + metric { + name = "mem_total" + title = "Memory Total" + } + /* Should this be here? Swap can be added/removed between reboots. */ + metric { + name = "swap_total" + title = "Swap Space Total" + } + metric { + name = "boottime" + title = "Last Boot Time" + } + metric { + name = "machine_type" + title = "Machine Type" + } + metric { + name = "os_name" + title = "Operating System" + } + metric { + name = "os_release" + title = "Operating System Release" + } + metric { + name = "location" + title = "Location" + } +} + +/* This collection group will send the status of gexecd for this host every 300 secs */ +/* Unlike 2.5.x the default behavior is to report gexecd OFF. */ +collection_group { + collect_once = yes + time_threshold = 300 + metric { + name = "gexec" + title = "Gexec Status" + } +} + +/* This collection group will collect the CPU status info every 20 secs. + The time threshold is set to 90 seconds. In honesty, this time_threshold could be + set significantly higher to reduce unneccessary network chatter. */ +collection_group { + collect_every = 20 + time_threshold = 180 + /* CPU status */ + metric { + name = "cpu_user" + value_threshold = "1.0" + title = "CPU User" + } + metric { + name = "cpu_system" + value_threshold = "1.0" + title = "CPU System" + } + metric { + name = "cpu_idle" + value_threshold = "5.0" + title = "CPU Idle" + } + metric { + name = "cpu_nice" + value_threshold = "1.0" + title = "CPU Nice" + } + metric { + name = "cpu_aidle" + value_threshold = "5.0" + title = "CPU aidle" + } + metric { + name = "cpu_wio" + value_threshold = "1.0" + title = "CPU wio" + } + /* The next two metrics are optional if you want more detail... + ... since they are accounted for in cpu_system. + metric { + name = "cpu_intr" + value_threshold = "1.0" + title = "CPU intr" + } + metric { + name = "cpu_sintr" + value_threshold = "1.0" + title = "CPU sintr" + } + */ +} + +collection_group { + collect_every = 20 + time_threshold = 90 + /* Load Averages */ + metric { + name = "load_one" + value_threshold = "1.0" + title = "One Minute Load Average" + } + metric { + name = "load_five" + value_threshold = "1.0" + title = "Five Minute Load Average" + } + metric { + name = "load_fifteen" + value_threshold = "1.0" + title = "Fifteen Minute Load Average" + } +} + +/* This group collects the number of running and total processes */ +collection_group { + collect_every = 80 + time_threshold = 950 + metric { + name = "proc_run" + value_threshold = "1.0" + title = "Total Running Processes" + } + metric { + name = "proc_total" + value_threshold = "1.0" + title = "Total Processes" + } +} + +/* This collection group grabs the volatile memory metrics every 40 secs and + sends them at least every 180 secs. This time_threshold can be increased + significantly to reduce unneeded network traffic. */ +collection_group { + collect_every = 40 + time_threshold = 180 + metric { + name = "mem_free" + value_threshold = "1024.0" + title = "Free Memory" + } + metric { + name = "mem_shared" + value_threshold = "1024.0" + title = "Shared Memory" + } + metric { + name = "mem_buffers" + value_threshold = "1024.0" + title = "Memory Buffers" + } + metric { + name = "mem_cached" + value_threshold = "1024.0" + title = "Cached Memory" + } + metric { + name = "swap_free" + value_threshold = "1024.0" + title = "Free Swap Space" + } +} + +collection_group { + collect_every = 40 + time_threshold = 300 + metric { + name = "bytes_out" + value_threshold = 4096 + title = "Bytes Sent" + } + metric { + name = "bytes_in" + value_threshold = 4096 + title = "Bytes Received" + } + metric { + name = "pkts_in" + value_threshold = 256 + title = "Packets Received" + } + metric { + name = "pkts_out" + value_threshold = 256 + title = "Packets Sent" + } +} + +/* Different than 2.5.x default since the old config made no sense */ +collection_group { + collect_every = 1800 + time_threshold = 3600 + metric { + name = "disk_total" + value_threshold = 1.0 + title = "Total Disk Space" + } +} + +collection_group { + collect_every = 40 + time_threshold = 180 + metric { + name = "disk_free" + value_threshold = 1.0 + title = "Disk Space Available" + } + metric { + name = "part_max_used" + value_threshold = 1.0 + title = "Maximum Disk Space Used" + } +} + diff --git a/haproxy/defaults/main.yml b/haproxy/defaults/main.yml new file mode 100644 index 00000000..f5ac062d --- /dev/null +++ b/haproxy/defaults/main.yml @@ -0,0 +1,8 @@ +--- +haproxy_latest_release: False +haproxy_version: 1.5 +haproxy_latest_repo: "deb http://haproxy.debian.net {{ ansible_distribution }}-backports-{{ haproxy_version }} main" +haproxy_pkg_state: latest + +haproxy_default_port: 80 +haproxy_terminate_tls: False diff --git a/haproxy/tasks/main.yml b/haproxy/tasks/main.yml new file mode 100644 index 00000000..83841985 --- /dev/null +++ b/haproxy/tasks/main.yml @@ -0,0 +1,27 @@ +--- +- name: Get the haproxy repo key + apt_key: url=http://haproxy.debian.net/bernat.debian.org.gpg state=present + when: haproxy_latest_release + register: haproxy_repo + tags: haproxy + +- name: Define the haproxy repository + apt_repository: repo='{{ haproxy_latest_repo }}' state=present + when: haproxy_latest_release + register: haproxy_repo + tags: haproxy + +- name: Update the apt cache if needed + apt: update_cache=yes + when: ( haproxy_repo | changed ) + tags: haproxy + +- name: Install the haproxy package + apt: name=haproxy state=latest default_release={{ ansible_distribution}}-backports + when: not haproxy_latest_release + tags: haproxy + +- name: Install the haproxy package + apt: name=haproxy state=latest default_release={{ ansible_distribution}}-backports-{{ haproxy_version }} + when: not haproxy_latest_release + tags: haproxy diff --git a/iptables/defaults/main.yml b/iptables/defaults/main.yml new file mode 100644 index 00000000..2bda3e68 --- /dev/null +++ b/iptables/defaults/main.yml @@ -0,0 +1,43 @@ +--- +# +# Reference only. Check the iptables-rules.v4.j2 for the list of accepted variables +# +#pg_allowed_hosts: +# - 146.48.123.17/32 +# - 146.48.122.110/32 +# +#munin_server: +# - 146.48.122.15 +# - 146.48.87.88 +#http_port: 80 +#http_allowed_hosts: +# - 1.2.3.4/24 +#https_port: 443 +#https_allowed_hosts: +# - 0.0.0.0/0 +# +# Generic tcp and udp access +# iptables: +# tcp_rules: True +# tcp: +# - { port: '8080', allowed_hosts: [ '{{ network.isti }}', '{{ network.nmis }}', '{{ network.eduroam }}' ] } +# - { port: '80', allowed_hosts: [ '{{ network.isti }}', '{{ network.nmis }}', '{{ network.eduroam }}' ] } +# - { port: '80' } +# udp_rules: True +# udp: +# - { port: '123', allowed_hosts: [ '{{ network.isti }}', '{{ network.nmis }}', '{{ network.eduroam }}' ] } + +# munin_server: +# - 146.48.122.15 +# - 146.48.87.88 + +#nagios_monitoring_server_ip: 146.48.123.23 +#mongodb: +# start_server: 'yes' +# tcp_port: 27017 +# allowed_hosts: +# - 146.48.123.100/32 + +#iptables_default_policy: REJECT +iptables_default_policy: ACCEPT +iptables_open_all_to_isti_nets: False diff --git a/iptables/handlers/main.yml b/iptables/handlers/main.yml new file mode 100644 index 00000000..aff26b18 --- /dev/null +++ b/iptables/handlers/main.yml @@ -0,0 +1,22 @@ +--- +- name: Start the iptables service + service: name=iptables-persistent state=started + notify: Restart fail2ban + +- name: Flush the iptables rules + command: /etc/init.d/iptables-persistent flush + ignore_errors: true + +- name: Start the iptables service on Ubuntu < 12.04 + command: /etc/init.d/iptables-persistent start + ignore_errors: true + +- name: Stop the iptables service on Ubuntu < 12.04 + command: /etc/init.d/iptables-persistent stop + ignore_errors: true + +- name: Restart fail2ban + service: name=fail2ban state=restarted enabled=yes + when: is_trusty + + diff --git a/iptables/tasks/main.yml b/iptables/tasks/main.yml new file mode 100644 index 00000000..da67fa44 --- /dev/null +++ b/iptables/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: Install the needed iptables packages + apt: pkg={{ item }} state=installed + with_items: + - iptables + - iptables-persistent + tags: + - iptables + +- name: Install the IPv4 rules with a different name. Needed by Ubuntu < 12.04 + template: src=iptables-{{ item }}.j2 dest=/etc/iptables/rules owner=root group=root mode=0640 + with_items: + - rules.v4 + when: is_ubuntu_between_10_04_and_11_04_and_is_debian_6 + notify: + - Start the iptables service on Ubuntu < 12.04 + tags: + - iptables + - iptables_rules + +- name: Install the IPv4 and IPv6 iptables rules. The IPv6 ones are not used + template: src=iptables-{{ item }}.j2 dest=/etc/iptables/{{ item }} owner=root group=root mode=0640 + with_items: + - rules.v4 + - rules.v6 + when: is_not_ubuntu_less_than_precise + notify: + - Start the iptables service + tags: + - iptables + - iptables_rules + diff --git a/iptables/templates/iptables-rules.v4.j2 b/iptables/templates/iptables-rules.v4.j2 new file mode 100644 index 00000000..e3a396f7 --- /dev/null +++ b/iptables/templates/iptables-rules.v4.j2 @@ -0,0 +1,182 @@ +# +# don't manually modify this file +# +*filter +:INPUT ACCEPT [0:0] +:FORWARD ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT +-A INPUT -p icmp -j ACCEPT +-A INPUT -i lo -j ACCEPT +# +{% if iptables_managed_ssh is defined and iptables_managed_ssh %} +{% if iptables_ssh_allowed_hosts is defined %} +# ssh is not open to all, even if we use denyhosts to prevent unauthorized accesses +{% for ip in ssh_allowed_hosts %} +-A INPUT -m state --state NEW -m tcp -p tcp -s {{ ip }} --dport 22 -j ACCEPT +{% endfor %} +{% endif %} +{% else %} +# ssh is always open. We use denyhosts to prevent unauthorized accesses +-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT +{% endif %} +{% if iptables_open_all_to_isti_nets %} +# Permit all traffic from our networks +-A INPUT -s {{ network.isti }} -j ACCEPT +-A INPUT -s {{ network.nmis }} -j ACCEPT +-A INPUT -s {{ network.eduroam }} -j ACCEPT +{% endif %} +{% if http_port is defined %} +# http +{% if http_allowed_hosts is defined %} +{% for ip in http_allowed_hosts %} +-A INPUT -m state --state NEW -s {{ ip }} -p tcp -m tcp --dport {{ http_port }} -j ACCEPT +{% endfor %} +{% else %} +-A INPUT -m state --state NEW -m tcp -p tcp --dport {{ http_port }} -j ACCEPT +{% endif %} +{% endif %} + +{% if https_port is defined %} +# https +{% if https_allowed_hosts is defined %} +{% for ip in https_allowed_hosts %} +-A INPUT -m state --state NEW -s {{ ip }} -p tcp -m tcp --dport {{ https_port }} -j ACCEPT +{% endfor %} +{% else %} +-A INPUT -m state --state NEW -m tcp -p tcp --dport {{ https_port }} -j ACCEPT +{% endif %} +{% endif %} + +{% if psql_db_port is defined %} +{% if psql_listen_on_ext_int %} +# postgresql clients +{% for db in psql_db_data %} +{% for ip in db.allowed_hosts %} +-A INPUT -m state --state NEW -s {{ ip }} -p tcp -m tcp --dport {{ psql_db_port }} -j ACCEPT +{% endfor %} +{% endfor %} +{% endif %} +-A INPUT -p tcp -m tcp --dport {{ psql_db_port }} -j DROP +{% endif %} + +{% if mongodb_allowed_hosts is defined %} +# mongodb clients +{% for ip in mongodb_allowed_hosts %} +-A INPUT -m state --state NEW -s {{ ip }} -p tcp -m tcp --dport {{ mongodb_tcp_port }} -j ACCEPT +{% endfor %} +-A INPUT -p tcp -m tcp --dport {{ mongodb_tcp_port }} -j DROP +{% endif %} + +{% if dnet_ports is defined %} +# dnet services +{% for tcp_port in dnet_ports %} +-A INPUT -m state --state NEW -p tcp -m tcp --dport {{ tcp_port }} -j ACCEPT +{% endfor %} +{% endif %} + +{% if dnet_jmx_ports is defined %} +# dnet jmx ports. Open to the isti networks only +{% for tcp_port in dnet_jmx_ports %} +-A INPUT -m state --state NEW -p tcp -m tcp -s {{ network.isti }} --dport {{ tcp_port }} -j ACCEPT +-A INPUT -m state --state NEW -p tcp -m tcp -s {{ network.nmis }} --dport {{ tcp_port }} -j ACCEPT +-A INPUT -m state --state NEW -p tcp -m tcp -s {{ network.eduroam }} --dport {{ tcp_port }} -j ACCEPT +{% endfor %} +{% endif %} + +{% if vsftpd_iptables_rules is defined and vsftpd_iptables_rules %} +# Someone still uses ftp +{% if vsftpd_iptables_allowed_hosts is defined and vsftpd_iptables_allowed_hosts %} +{% for ip in vsftpd_iptables_allowed_hosts %} +-A INPUT -m state --state NEW -m tcp -p tcp -s {{ ip }} --dport ftp -j ACCEPT +-A INPUT -m state --state NEW,RELATED -m tcp -p tcp -s {{ ip }} --dport {{ vsftpd_pasv_min_port }}:{{ vsftpd_pasv_max_port }} -j ACCEPT +{% endfor %} +-A INPUT -m helper --helper ftp -j ACCEPT +{% endif %} +{% endif %} + +{% if nagios_monitoring_server_ip is defined %} +# Nagios NRPE +-A INPUT -m state --state NEW -s {{ nagios_monitoring_server_ip }} -p tcp -m tcp --dport 5666 -j ACCEPT +-A INPUT -s {{ nagios_monitoring_server_ip }} -p udp -m udp --dport 123 -j ACCEPT +{% endif %} + +{% if munin_server is defined and configure_munin is defined and configure_munin %} +{% for ip in munin_server %} +# Munin +-A INPUT -m state --state NEW -s {{ ip }} -p tcp -m tcp --dport 4949 -j ACCEPT +{% endfor %} +{% endif %} + +{% if ganglia_gmond_cluster_port is defined %} +# Ganglia +{% if ganglia_gmond_use_jmxtrans is not defined or not ganglia_gmond_use_jmxtrans %} +-A INPUT -m pkttype --pkt-type multicast -d {{ ganglia_gmond_mcast_addr }} -j ACCEPT +{% else %} +-A INPUT -m pkttype --pkt-type multicast -j ACCEPT +-A INPUT -p udp -m udp -d {{ ganglia_gmond_mcast_addr }} --dport {{ ganglia_gmond_cluster_port }} -j ACCEPT +{% endif %} +-A INPUT -m state --state NEW -s {{ ganglia_gmetad_host }} -p tcp -m tcp --dport {{ ganglia_gmond_cluster_port }} -j ACCEPT +-A INPUT -s {{ ganglia_gmetad_host }} -p udp -m udp --dport {{ ganglia_gmond_cluster_port }} -j ACCEPT +{% endif %} + +{% if postfix_relay_server is defined and postfix_relay_server %} +# +# These are only needed on the machines that act as relay servers +# +-A INPUT -p tcp -m multiport --dports 25,587,465 -s {{ network.nmis }} -j ACCEPT +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -s 127.0.0.1 -d 127.0.0.1 -j ACCEPT +{% if postfix_use_relay_host is defined and postfix_use_relay_host %} +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -m owner --gid-owner postfix -d {{ postfix_relay_host }} -j ACCEPT +{% else %} +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -m owner --gid-owner postfix -j ACCEPT +{% endif %} +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -m state --state NEW -j LOG --log-prefix "LOCAL_DROPPED_SPAM " --log-uid +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -j DROP +{% endif %} +{% if postfix_relay_server is defined and not postfix_relay_server %} +# +# When we are not a relay server but we want send email using our relay +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -s 127.0.0.1 -d 127.0.0.1 -j ACCEPT +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -m owner --gid-owner postfix -d {{ postfix_relay_host }} -j ACCEPT +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -m state --state NEW -j LOG --log-prefix "LOCAL_DROPPED_SPAM " --log-uid +-A OUTPUT -p tcp -m multiport --dports 25,587,465 -j DROP +{% endif %} + +{% if iptables is defined %} +{% if iptables.tcp_rules is defined and iptables.tcp_rules %} +{% for tcp_rule in iptables.tcp %} +{% if tcp_rule.allowed_hosts is defined %} +{% for ip in tcp_rule.allowed_hosts %} +-A INPUT -m state --state NEW -s {{ ip }} -p tcp -m tcp --dport {{ tcp_rule.port }} -j ACCEPT +{% endfor %} +{% else %} +-A INPUT -m state --state NEW -m tcp -p tcp --dport {{ tcp_rule.port }} -j ACCEPT +{% endif %} +{% endfor %} +{% endif %} + +{% if iptables.udp_rules is defined and iptables.udp_rules %} +{% for udp_rule in iptables.udp %} +{% if udp_rule.allowed_hosts is defined %} +{% for ip in udp_rule.allowed_hosts %} +-A INPUT -s {{ ip }} -p udp -m udp --dport {{ udp_rule.port }} -j ACCEPT +{% endfor %} +{% else %} +-A INPUT -p udp -m udp --dport {{ udp_rule.port }} -j ACCEPT +{% endif %} +{% endfor %} +{% endif %} +{% endif %} +# +# +-A INPUT -s 125.24.0.0/14 -j DROP +{% if iptables_default_policy == 'REJECT' %} +-A INPUT -j REJECT --reject-with icmp-host-prohibited +-A FORWARD -j REJECT --reject-with icmp-host-prohibited +{% else %} +-A INPUT -j {{ iptables_default_policy }} +-A FORWARD -j {{ iptables_default_policy }} +{% endif %} + +COMMIT diff --git a/iptables/templates/iptables-rules.v6.j2 b/iptables/templates/iptables-rules.v6.j2 new file mode 100644 index 00000000..d998b087 --- /dev/null +++ b/iptables/templates/iptables-rules.v6.j2 @@ -0,0 +1,5 @@ +*filter +:INPUT ACCEPT [0:0] +:FORWARD ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +COMMIT diff --git a/jetty-apache/defaults/main.yml b/jetty-apache/defaults/main.yml new file mode 100644 index 00000000..dd70eee6 --- /dev/null +++ b/jetty-apache/defaults/main.yml @@ -0,0 +1,22 @@ +--- +jetty: + activate_at_boot: True + listen_ip: 127.0.0.1 + user: jetty + group: jetty + verbose: "Yes" + java_opts: "-Xmx1024m -Djava.awt.headless=true" + http_port: 8080 + java_opts: "-Xmx2048m -XX:MaxPermSize=512m -Djava.awt.headless=true -Dorg.mortbay.util.URI.charset=utf-8" + enable_jmx: False + jmx_java_options: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=8286 -Dcom.sun.management.jmxremote.ssl=false" + cache_dir: /var/cache/jetty + tmp_dir: /var/cache/jetty/data + jvm_tmp_dir: /var/cache/jetty/tmp + shutdown_timeout: 30 + java_home: "" + logfile_days: 14 + define_lang: False + lang: "en_US.UTF-8" + open_files: 2048 + diff --git a/jetty-apache/handlers/main.yml b/jetty-apache/handlers/main.yml new file mode 100644 index 00000000..ea56b4e8 --- /dev/null +++ b/jetty-apache/handlers/main.yml @@ -0,0 +1,9 @@ +--- +- name: apache2 reload + service: name=apache2 state=reloaded + +- name: Start jetty + service: name=jetty state=started + +- name: Restart jetty + service: name=jetty state=restarted diff --git a/jetty-apache/tasks/apache.yml b/jetty-apache/tasks/apache.yml new file mode 100644 index 00000000..776959eb --- /dev/null +++ b/jetty-apache/tasks/apache.yml @@ -0,0 +1,29 @@ +--- +- name: Install the apache packages + apt: pkg={{ item }} state=installed force=yes + with_items: + - apache2 + - apache2-mpm-prefork + - apache2-utils + - unzip + - zip + tags: + - apache + +- name: Load the required modules + file: src=/etc/apache2/mods-available/{{ item }} dest=/etc/apache2/mods-enabled/{{ item }} state=link + with_items: + - proxy.load + - proxy_http.load + - headers.load + - rewrite.load + - expires.load + notify: apache2 reload + tags: + - apache + +- name: Remove the default apache virtualhost + file: dest=/etc/apache2/sites-enabled/000-default state=absent + notify: apache2 reload + tags: + - apache diff --git a/jetty-apache/tasks/jetty.yml b/jetty-apache/tasks/jetty.yml new file mode 100644 index 00000000..ec14fd90 --- /dev/null +++ b/jetty-apache/tasks/jetty.yml @@ -0,0 +1,53 @@ +--- +- name: Install the jetty packages + apt: pkg={{ item }} state=installed force=yes + with_items: + - jetty + - libapache2-mod-jk + notify: + apache2 reload + tags: + - jetty + +- name: Ensure that the jetty cache directory exists + file: dest={{ item }} owner={{ jetty.user }} group={{ jetty.group }} mode=0750 state=directory + with_items: + - '{{ jetty.cache_dir }}' + - '{{ jetty.tmp_dir }}' + - '{{ jetty.jvm_tmp_dir }}' + tags: + - jetty + +- name: Install the jetty defaults + template: src=jetty-defaults.j2 dest=/etc/default/jetty + notify: + Restart jetty + tags: + - jetty + +# - name: Set the jetty limits +# template: src={{ item }}.j2 dest=/etc/jetty/{{ item }} +# with_items: +# - jetty-setuid.xml +# notify: +# Restart jetty +# tags: +# - jetty + +# - name: Load jetty-setuid.xml in /etc/jetty/jetty.conf +# lineinfile: name=/etc/jetty/jetty.conf line={{ item }} +# with_items: +# - '/etc/jetty/jetty-setuid.xml' +# notify: +# Restart jetty +# tags: +# - jetty + +- name: Set the jetty limits + template: src={{ item }}.j2 dest=/etc/security/limits.d/{{ item }} + with_items: + - jetty-limits.conf + notify: + Restart jetty + tags: + - jetty diff --git a/jetty-apache/tasks/main.yml b/jetty-apache/tasks/main.yml new file mode 100644 index 00000000..c2545b29 --- /dev/null +++ b/jetty-apache/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- include: apache.yml +- include: jetty.yml diff --git a/jetty-apache/templates/jetty-defaults.j2 b/jetty-apache/templates/jetty-defaults.j2 new file mode 100644 index 00000000..6bc48d92 --- /dev/null +++ b/jetty-apache/templates/jetty-defaults.j2 @@ -0,0 +1,51 @@ +# change to 0 to allow Jetty to start +{% if jetty.activate_at_boot %} +NO_START=0 +{% else %} +NO_START=YES +{% endif %} + +# change to 'no' or uncomment to use the default setting in /etc/default/rcS +VERBOSE={{ jetty.verbose }} + +# Run Jetty as this user ID (default: jetty) +# Set this to an empty string to prevent Jetty from starting automatically +JETTY_USER={{ jetty.user }} + +# Listen to connections from this network host +# Use 0.0.0.0 as host to accept all connections. +# Uncomment to restrict access to localhost +JETTY_HOST={{ jetty.listen_ip }} + +# The network port used by Jetty +JETTY_PORT={{ jetty.http_port }} + +# Timeout in seconds for the shutdown of all webapps +JETTY_SHUTDOWN={{ jetty.shutdown_timeout }} + +# Additional arguments to pass to Jetty +#JETTY_ARGS= + +# Extra options to pass to the JVM +JAVA_OPTIONS="{{ jetty.java_opts }}" +{% if jetty.enable_jmx %} +JAVA_OPTIONS="$JAVA_OPTIONS {{ jetty.jmx_java_options }}" +{% endif %} + +# Home of Java installation. +JAVA_HOME={{ jetty.java_home }} + +# Jetty uses a directory to store temporary files like unpacked webapps +JETTY_TMP={{ jetty.tmp_dir }} +JVM_TMP={{ jetty.jvm_tmp_dir }} + +# Jetty uses a config file to setup its boot classpath +#JETTY_START_CONFIG=/etc/jetty/start.config + +# Default for number of days to keep old log files in /var/log/jetty/ +LOGFILE_DAYS={{ jetty.logfile_days }} + +{% if jetty.define_lang %} +export LANG={{ jetty.lang }} +{% endif %} + diff --git a/jetty-apache/templates/jetty-limits.conf.j2 b/jetty-apache/templates/jetty-limits.conf.j2 new file mode 100644 index 00000000..80fc4630 --- /dev/null +++ b/jetty-apache/templates/jetty-limits.conf.j2 @@ -0,0 +1,4 @@ +{{ jetty.user }} soft nofile {{ jetty.open_files }} +{{ jetty.user }} hard nofile {{ jetty.open_files }} +root soft nofile {{ jetty.open_files }} +root hard nofile {{ jetty.open_files }} diff --git a/jetty-apache/templates/jetty-setuid.xml.j2 b/jetty-apache/templates/jetty-setuid.xml.j2 new file mode 100644 index 00000000..cf0bc326 --- /dev/null +++ b/jetty-apache/templates/jetty-setuid.xml.j2 @@ -0,0 +1,22 @@ + + + + + + + + + + false + 2 + {{ jetty.user }} + {{ jetty.group }} + + + + {{ jetty.open_files }} + {{ jetty.open_files }} + + + + diff --git a/jetty/defaults/main.yml b/jetty/defaults/main.yml new file mode 100644 index 00000000..76902b8c --- /dev/null +++ b/jetty/defaults/main.yml @@ -0,0 +1,25 @@ +--- +jetty_activate_at_boot: True +jetty_listen_ip: 127.0.0.1 +jetty_user: jetty +jetty_group: jetty +jetty_verbose: "Yes" +jetty_http_port: 8080 +jetty_ajp_port: 8009 +jetty_java_opts: "-Xmx2048m -XX:MaxPermSize=512m -Djava.awt.headless=true -Dorg.mortbay.util.URI.charset=utf-8" +jetty_enable_jmx: False +jetty_jmx_port: 8286 +jetty_jmx_authenticate: "false" +jetty_jmx_ssl: "false" +jetty_jmx_java_options: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=${jetty_jmx_authenticate} -Dcom.sun.management.jmxremote.port=${jetty_jmx_port} -Dcom.sun.management.jmxremote.ssl=${jetty_jmx_ssl}" +jetty_cache_dir: /var/cache/jetty +jetty_tmp_dir: /var/cache/jetty/data +jetty_jvm_tmp_dir: /var/cache/jetty/tmp +jetty_shutdown_timeout: 30 +jetty_java_home: "" +jetty_logfile_days: 14 +jetty_define_lang: False +jetty_lang: "en_US.UTF-8" +jetty_open_files: 2048 +jetty_use_apache: False + diff --git a/jetty/handlers/main.yml b/jetty/handlers/main.yml new file mode 100644 index 00000000..ea56b4e8 --- /dev/null +++ b/jetty/handlers/main.yml @@ -0,0 +1,9 @@ +--- +- name: apache2 reload + service: name=apache2 state=reloaded + +- name: Start jetty + service: name=jetty state=started + +- name: Restart jetty + service: name=jetty state=restarted diff --git a/jetty/tasks/jetty.yml b/jetty/tasks/jetty.yml new file mode 100644 index 00000000..bbb248dd --- /dev/null +++ b/jetty/tasks/jetty.yml @@ -0,0 +1,48 @@ +--- +- name: Install the jetty packages + apt: pkg={{ item }} state=installed force=yes + with_items: + - jetty + tags: + - jetty + +- name: Fix the broken jetty startup script + shell: perl -pi -e "s/\^\[:space:]\*/^[[:space:]]*/g" /etc/init.d/jetty + ignore_errors: True + tags: + - jetty + +- name: Install the apache mod_jk module, if needed + apt: pkg={{ item }} state=installed force=yes + with_items: + - libapache2-mod-jk + when: jetty_use_apache is defined and jetty_use_apache + notify: + apache2 reload + tags: + - jetty + +- name: Ensure that the jetty cache directory exists + file: dest={{ item }} owner={{ jetty_user }} group={{ jetty_group }} mode=0750 state=directory + with_items: + - '{{ jetty_cache_dir }}' + - '{{ jetty_tmp_dir }}' + - '{{ jetty_jvm_tmp_dir }}' + tags: + - jetty + +- name: Install the jetty defaults + template: src=jetty-defaults.j2 dest=/etc/default/jetty + notify: + Restart jetty + tags: + - jetty + +- name: Set the jetty limits + template: src={{ item }}.j2 dest=/etc/security/limits.d/{{ item }} + with_items: + - jetty-limits.conf + notify: + Restart jetty + tags: + - jetty diff --git a/jetty/tasks/main.yml b/jetty/tasks/main.yml new file mode 100644 index 00000000..bf995797 --- /dev/null +++ b/jetty/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include: jetty.yml diff --git a/jetty/templates/jetty-defaults.j2 b/jetty/templates/jetty-defaults.j2 new file mode 100644 index 00000000..6eb2b44a --- /dev/null +++ b/jetty/templates/jetty-defaults.j2 @@ -0,0 +1,51 @@ +# change to 0 to allow Jetty to start +{% if jetty_activate_at_boot %} +NO_START=0 +{% else %} +NO_START=YES +{% endif %} + +# change to 'no' or uncomment to use the default setting in /etc/default/rcS +VERBOSE={{ jetty_verbose }} + +# Run Jetty as this user ID (default: jetty) +# Set this to an empty string to prevent Jetty from starting automatically +JETTY_USER={{ jetty_user }} + +# Listen to connections from this network host +# Use 0.0.0.0 as host to accept all connections. +# Uncomment to restrict access to localhost +JETTY_HOST={{ jetty_listen_ip }} + +# The network port used by Jetty +JETTY_PORT={{ jetty_http_port }} + +# Timeout in seconds for the shutdown of all webapps +JETTY_SHUTDOWN={{ jetty_shutdown_timeout }} + +# Additional arguments to pass to Jetty +#JETTY_ARGS= + +# Extra options to pass to the JVM +JAVA_OPTIONS="{{ jetty_java_opts }}" +{% if jetty_enable_jmx %} +JAVA_OPTIONS="$JAVA_OPTIONS {{ jetty_jmx_java_options }}" +{% endif %} + +# Home of Java installation. +JAVA_HOME={{ jetty_java_home }} + +# Jetty uses a directory to store temporary files like unpacked webapps +JETTY_TMP={{ jetty_tmp_dir }} +JVM_TMP={{ jetty_jvm_tmp_dir }} + +# Jetty uses a config file to setup its boot classpath +#JETTY_START_CONFIG=/etc/jetty/start.config + +# Default for number of days to keep old log files in /var/log/jetty/ +LOGFILE_DAYS={{ jetty_logfile_days }} + +{% if jetty_define_lang %} +export LANG={{ jetty_lang }} +{% endif %} + diff --git a/jetty/templates/jetty-limits.conf.j2 b/jetty/templates/jetty-limits.conf.j2 new file mode 100644 index 00000000..d9b1cd65 --- /dev/null +++ b/jetty/templates/jetty-limits.conf.j2 @@ -0,0 +1,4 @@ +{{ jetty_user }} soft nofile {{ jetty_open_files }} +{{ jetty_user }} hard nofile {{ jetty_open_files }} +root soft nofile {{ jetty_open_files }} +root hard nofile {{ jetty_open_files }} diff --git a/jetty/templates/jetty-setuid.xml.j2 b/jetty/templates/jetty-setuid.xml.j2 new file mode 100644 index 00000000..cfc4c1d8 --- /dev/null +++ b/jetty/templates/jetty-setuid.xml.j2 @@ -0,0 +1,22 @@ + + + + + + + + + + false + 2 + {{ jetty_user }} + {{ jetty_group }} + + + + {{ jetty_open_files }} + {{ jetty_open_files }} + + + + diff --git a/ldap-client-config/defaults/main.yml b/ldap-client-config/defaults/main.yml new file mode 100644 index 00000000..048eaa20 --- /dev/null +++ b/ldap-client-config/defaults/main.yml @@ -0,0 +1,4 @@ +--- +nemis_ldap_uri: "ldap://ldap.sub.research-infrastructures.eu" +nemis_ldap_base_dn: "dc=research-infrastructures,dc=eu" + diff --git a/ldap-client-config/tasks/main.yml b/ldap-client-config/tasks/main.yml new file mode 100644 index 00000000..7f5d78bf --- /dev/null +++ b/ldap-client-config/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Install the ldap utilities + apt: pkg={{ item }} state={{ pkg_state }} + with_items: + - ldapscripts + - libpam-ldap + tags: + - ldap-client + +- name: Write the ldap client configuration file + template: src=ldap.conf.j2 dest=/etc/ldap.conf mode=444 owner=root group=root + when: is_ubuntu_less_than_trusty + tags: + - ldap-client + +- name: Write the ldap client configuration file + template: src=ldap.conf.j2 dest=/etc/ldap/ldap.conf mode=444 owner=root group=root + when: is_trusty + tags: + - ldap-client + +- name: set the ldapscripts.conf uri + action: configfile path=/etc/ldapscripts/ldapscripts.conf key=SERVER value='{{ nemis_ldap_uri }}' syntax=shell + when: is_trusty + tags: + - ldap-client + +- name: set the ldapscripts.conf bind dn + action: configfile path=/etc/ldapscripts/ldapscripts.conf key=BINDDN value='cn=admin,{{ nemis_ldap_base_dn }}' syntax=shell + when: is_trusty + tags: + - ldap-client + +- name: set the ldapscripts.conf dn suffix + action: configfile path=/etc/ldapscripts/ldapscripts.conf key=SUFFIX value='{{ nemis_ldap_base_dn }}' syntax=shell + when: is_trusty + tags: + - ldap-client + diff --git a/ldap-client-config/templates/ldap.conf.j2 b/ldap-client-config/templates/ldap.conf.j2 new file mode 100644 index 00000000..7a81eae4 --- /dev/null +++ b/ldap-client-config/templates/ldap.conf.j2 @@ -0,0 +1,11 @@ +# The distinguished name of the search base. +BASE {{ nemis_ldap_base_dn }} + +# Another way to specify your LDAP server is to provide an +URI {{ nemis_ldap_uri }} + +# The LDAP version to use (defaults to 3 +# if supported by client library) +ldap_version 3 + +nss_initgroups_ignoreusers avahi,backup,bin,daemon,games,gnats,irc,libuuid,list,lp,mail,man,messagebus,munin,news,nslcd,proxy,root,rstudio-server,sshd,sync,sys,syslog,uucp,www-data diff --git a/logstash-rsyslog/defaults/main.yml b/logstash-rsyslog/defaults/main.yml new file mode 100644 index 00000000..4cf1fe8e --- /dev/null +++ b/logstash-rsyslog/defaults/main.yml @@ -0,0 +1,41 @@ +--- +# +rsyslog_install_newer_package: True +rsyslog_ppa: "ppa:adiscon/v8-stable" +rsyslog_debian_repo: "deb http://debian.adiscon.com/v8-stable wheezy/" +rsyslog_repo_key: "AEF0CF8E" +rsyslog_pkg_status: "latest" + +rsyslog_use_inotify: True +# Not used when inotify is enabled +rsyslog_file_polling_interval: 10 + +# We use logstash if the elastisearch module is not enabled +#rsys_logstash_collector_host: logstash.t.hadoop.research-infrastructures.eu +rsys_logstash_collector_host: logstash +rsys_logstash_collector_port: 5544 + +# IMPORTANT: the log_state_file names must be unique +#rsys_logfiles: +# - { logfile: '/var/log/tomcat7/catalina.log', log_tag: 'solr-state', log_state_file: 'solr-state'} +# - { logfile: '/var/log/tomcat7/localhost_access.log', log_tag: 'solr-access', log_state_file: 'solr-access'} + +# +# IMPORTANT NOTE: the following setting only work if rsyslog_install_newer_package is set to True +# +rsyslog_use_queues: True +rsyslog_main_queue_size: 1000000 +rsyslog_main_queue_debatchsize: 256 +rsyslog_main_queue_workerthreads: 2 +rsyslog_action_queue_debatchsize: 1024 +rsyslog_action_queue_size: 100000 +rsyslog_action_queue_workerthreads: 5 +# -1 means retry indefinitely if ES is unreachable +rsyslog_action_resumeretrycount: -1 + +# The elasticsearch module bypasses logstash and talks directly to elasticsearch +rsyslog_use_elasticsearch_module: True +#rsys_elasticsearch_collector_host: logstash.t.hadoop.research-infrastructures.eu +rsys_elasticsearch_collector_host: logstash +rsys_elasticsearch_collector_port: 9200 + diff --git a/logstash-rsyslog/handlers/main.yml b/logstash-rsyslog/handlers/main.yml new file mode 100644 index 00000000..ab5be766 --- /dev/null +++ b/logstash-rsyslog/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart rsyslog + #service: name=rsyslog state=restarted + command: /usr/sbin/service rsyslog stop ; /usr/sbin/service rsyslog start + + diff --git a/logstash-rsyslog/tasks/main.yml b/logstash-rsyslog/tasks/main.yml new file mode 100644 index 00000000..5cc018ac --- /dev/null +++ b/logstash-rsyslog/tasks/main.yml @@ -0,0 +1,70 @@ +--- +- name: Install the rsyslog ppa on ubuntu precise or later + apt_repository: repo='{{ rsyslog_ppa }}' + when: + - is_ubuntu + - rsyslog_install_newer_package + register: rsyslog_ubuntu_repo + tags: + - rsyslog + - logstash + +- name: Install the rsyslog repo key on debian wheezy + apt_key: keyserver=keys.gnupg.net id=AEF0CF8E state=present + when: + - is_debian7 + - rsyslog_install_newer_package + tags: + - rsyslog + - logstash + +- name: Install the rsyslog repository on debian wheezy + copy: content="{{ rsyslog_debian_repo }}\n" dest=/etc/apt/sources.list.d/adiscon-rsyslog.list + register: rsyslog_debian_repo + when: + - is_debian7 + - rsyslog_install_newer_package + tags: + - rsyslog + - logstash + +- name: Update apt cache + apt: update_cache=yes + when: (rsyslog_ubuntu_repo|changed) or (rsyslog_debian_repo|changed) + tags: + - rsyslog + - logstash + +- name: Add the syslog user to the adm group so it can read all the log files + user: name=syslog groups=adm + tags: + - rsyslog + - logstash + +- name: Upgrade rsyslog and install the elasticsearch module + apt: pkg={{ item }} state={{ rsyslog_pkg_status }} + with_items: + - rsyslog + - rsyslog-elasticsearch + tags: + - rsyslog + - logstash + +- name: Add a rsyslog configuration to send logfiles data to a logstash collector or directly to elasticsearch + template: src=rsyslog-logstash.conf.j2 dest=/etc/rsyslog.d/90-rsyslog-logstash.conf owner=root group=root mode=0444 + when: rsyslog_install_newer_package + notify: + Restart rsyslog + tags: + - logstash + - rsyslog + +- name: Add a rsyslog configuration to send logfiles data to a logstash collector when using the original and old rsyslog package + template: src=old-rsyslog-logstash.conf.j2 dest=/etc/rsyslog.d/90-rsyslog-logstash.conf owner=root group=root mode=0444 + when: not rsyslog_install_newer_package + notify: + Restart rsyslog + tags: + - logstash + - rsyslog + diff --git a/logstash-rsyslog/templates/old-rsyslog-logstash.conf.j2 b/logstash-rsyslog/templates/old-rsyslog-logstash.conf.j2 new file mode 100644 index 00000000..6b04e064 --- /dev/null +++ b/logstash-rsyslog/templates/old-rsyslog-logstash.conf.j2 @@ -0,0 +1,13 @@ +$ModLoad imfile + +{% for log in rsys_logfiles %} +$InputFileName {{ log.logfile }} +$InputFileTag {{ log.log_tag }} +$InputFileStateFile {{ log.log_state_file }} +$InputRunFileMonitor + +{% endfor %} + +# Send all to the logstash server +*.* @@{{ rsys_logstash_collector_host }}:{{ rsys_logstash_collector_port }} + diff --git a/logstash-rsyslog/templates/rsyslog-logstash.conf.j2 b/logstash-rsyslog/templates/rsyslog-logstash.conf.j2 new file mode 100644 index 00000000..829ef0f7 --- /dev/null +++ b/logstash-rsyslog/templates/rsyslog-logstash.conf.j2 @@ -0,0 +1,70 @@ +{% if rsys_logfiles is defined %} +{% if rsyslog_use_inotify %} +module(load="imfile" mode="inotify" ) +{% else %} +module(load="imfile" mode="polling" PollingInterval="10" ) +{% endif %} +{% for log in rsys_logfiles %} +input( +Type="imfile" +File="{{ log.logfile }}" +Tag="{{ log.log_tag }}" +) + +{% endfor %} +{% endif %} +{% if rsyslog_use_elasticsearch_module %} +module(load="omelasticsearch") + +{% if rsyslog_use_queues %} +main_queue( + queue.size="{{ rsyslog_main_queue_size }}" # capacity of the main queue + queue.debatchsize="{{ rsyslog_main_queue_debatchsize }}" # process messages in batches of 1000 and move them to the action queues + queue.workerthreads="{{ rsyslog_main_queue_workerthreads }}" # threads for the main queue +) +{% endif %} + +template(name="logstash-index" + type="list") { + constant(value="logstash-") + property(name="timereported" dateFormat="rfc3339" position.from="1" position.to="4") + constant(value=".") + property(name="timereported" dateFormat="rfc3339" position.from="6" position.to="7") + constant(value=".") + property(name="timereported" dateFormat="rfc3339" position.from="9" position.to="10") +} + +# this is for formatting our syslog in JSON with @timestamp +template(name="plain-syslog" + type="list") { + constant(value="{") + constant(value="\"@timestamp\":\"") property(name="timereported" dateFormat="rfc3339") + constant(value="\"received_at\":\"") property(name="timereported" dateFormat="rfc3339") + constant(value="\",\"host\":\"") property(name="hostname") + constant(value="\",\"received_from\":\"") property(name="hostname") + constant(value="\",\"severity\":\"") property(name="syslogseverity-text") + constant(value="\",\"facility\":\"") property(name="syslogfacility-text") + constant(value="\",\"tag\":\"") property(name="syslogtag" format="json") + constant(value="\",\"message\":\"") property(name="msg" format="json") + constant(value="\"}") +} +# this is where we actually send the logs to Elasticsearch ({{ rsys_elasticsearch_collector_host }}:{{ rsys_elasticsearch_collector_port }}) +*.* action(type="omelasticsearch" + template="plain-syslog" + searchIndex="logstash-index" + dynSearchIndex="on" +{% if rsyslog_use_queues %} + bulkmode="on" + queue.dequeuebatchsize="{{ rsyslog_action_queue_debatchsize }}" # ES bulk size + queue.size="{{ rsyslog_action_queue_size }}" # capacity of the action queue + queue.workerthreads="{{ rsyslog_action_queue_workerthreads }}" # workers for the action + action.resumeretrycount="{{ rsyslog_action_resumeretrycount }}" +{% endif %} + server="{{ rsys_elasticsearch_collector_host }}" + serverport="{{ rsys_elasticsearch_collector_port }}" + ) +{% else %} +# Send all to the logstash server +*.* @@{{ rsys_logstash_collector_host }}:{{ rsys_logstash_collector_port }} +{% endif %} + diff --git a/mongodb/defaults/main.yml b/mongodb/defaults/main.yml new file mode 100644 index 00000000..f80e739c --- /dev/null +++ b/mongodb/defaults/main.yml @@ -0,0 +1,22 @@ +--- +mongodb: + start_server: 'yes' + tcp_port: 27017 + allowed_hosts: + - '{{ ansible_fqdn }}/32' + - 127.0.0.1/8 + +mongodb_install_from_external_repo: True +mongodb_start_server: 'yes' +mongodb_tcp_port: 27017 +mongodb_http_interface: False +mongodb_http_port: 28017 +mongodb_user: mongodb +mongodb_group: mongodb +mongodb_logdir: /var/log/mongodb +mongodb_logpath: '{{ mongodb_logdir }}/mongodb.log' +mongodb_dbpath: /var/lib/mongodb +mongodb_directoryperdb: False +mongodb_allowed_hosts: + - '{{ ansible_fqdn }}/32' + - 127.0.0.1/8 diff --git a/mongodb/handlers/main.yml b/mongodb/handlers/main.yml new file mode 100644 index 00000000..b90f828c --- /dev/null +++ b/mongodb/handlers/main.yml @@ -0,0 +1,7 @@ +--- +- name: Update apt cache + apt: update_cache=yes + ignore_errors: true + +- name: Restart mongodb + service: name=mongodb state=restarted diff --git a/mongodb/tasks/main.yml b/mongodb/tasks/main.yml new file mode 100644 index 00000000..fd7c34eb --- /dev/null +++ b/mongodb/tasks/main.yml @@ -0,0 +1,61 @@ +--- +- name: Install the mongodb apt key + #apt_key: id=7F0CEB10 state=present + raw: apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 + when: mongodb_install_from_external_repo + tags: mongodb + +- name: Install the mongodb repository + copy: content="deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen" dest=/etc/apt/sources.list.d/mongodb.list owner=root group=root mode=044 + when: mongodb_install_from_external_repo + register: external_repo + tags: mongodb + +- name: Update the apt cache + apt: update_cache=yes + when: ( external_repo | changed ) + ignore_errors: True + tags: mongodb + +- name: Install the mongodb server + apt: pkg={{ item }} state=installed + with_items: + - mongodb-10gen + when: mongodb_install_from_external_repo + tags: mongodb + +- name: Install the mongodb server + apt: pkg={{ item }} state=installed + with_items: + - mongodb-server + when: not mongodb_install_from_external_repo + tags: mongodb + +- name: Install the mongodb defaults file + copy: content="ENABLE_MONGODB={{ mongodb_start_server }}" dest=/etc/default/mongodb owner=root group=root mode=0444 + tags: mongodb + +- name: Create the mongodb db directory + file: dest={{ mongodb_dbpath }} state=directory owner={{ mongodb_user }} group={{ mongodb_group }} mode=0755 + tags: mongodb + +- name: Create the mongodb log directory + file: dest={{ mongodb_logdir }} state=directory owner={{ mongodb_user }} group={{ mongodb_group }} mode=0755 + tags: mongodb + +- name: Install the mongodb 2.4 configuration + template: src=mongodb-2.4.conf.j2 dest=/etc/mongodb.conf owner=root group=root mode=0444 + when: ( mongodb_start_server is defined ) and ( mongodb_start_server == 'yes' ) + notify: Restart mongodb + tags: mongodb + +- name: Ensure mongodb is started + service: name=mongodb state=started enabled=yes + when: ( mongodb_start_server is defined ) and ( mongodb_start_server == 'yes' ) + tags: mongodb + +- name: Ensure mongodb is stopped and disabled + service: name=mongodb state=stopped enabled=no + when: ( mongodb_start_server is defined ) and ( mongodb_start_server == 'no' ) + tags: mongodb + diff --git a/mongodb/templates/mongodb-2.4.conf.j2 b/mongodb/templates/mongodb-2.4.conf.j2 new file mode 100644 index 00000000..87745e2e --- /dev/null +++ b/mongodb/templates/mongodb-2.4.conf.j2 @@ -0,0 +1,84 @@ +# Note: if you run mongodb as a non-root user (recommended) you may +# need to create and set permissions for this directory manually, +# e.g., if the parent directory isn't mutable by the mongodb user. +dbpath={{ mongodb_dbpath }} +directoryperdb={{ mongodb_directoryperdb }} + +#where to log +logpath={{ mongodb_logpath }} + +logappend=true + +port = {{ mongodb_tcp_port }} + +# Disables write-ahead journaling +# nojournal = true + +# Enables periodic logging of CPU utilization and I/O wait +#cpu = true + +# Turn on/off security. Off is currently the default +#noauth = true +#auth = true + +# Verbose logging output. +#verbose = true + +# Inspect all client data for validity on receipt (useful for +# developing drivers) +#objcheck = true + +# Enable db quota management +#quota = true + +# Set oplogging level where n is +# 0=off (default) +# 1=W +# 2=R +# 3=both +# 7=W+some reads +#diaglog = 0 +# Ignore query hints +#nohints = true + +{% if not mongodb_http_interface %} +# Disable the HTTP interface (Defaults to localhost:28017). +nohttpinterface = true +{% endif %} + +# Turns off server-side scripting. This will result in greatly limited +# functionality +#noscripting = true + +# Turns off table scans. Any query that would do a table scan fails. +#notablescan = true + +# Disable data file preallocation. +#noprealloc = true + +# Specify .ns file size for new databases. +# nssize = + +# Accout token for Mongo monitoring server. +#mms-token = + +# Server name for Mongo monitoring server. +#mms-name = + +# Ping interval for Mongo monitoring server. +#mms-interval = + +# Replication Options + +# in master/slave replicated mongo databases, specify here whether +# this is a slave or master +#slave = true +#source = master.example.com +# Slave only: specify a single database to replicate +#only = master.example.com +# or +#master = true +#source = slave.example.com + +# in replica set configuration, specify the name of the replica set +# replSet = setname diff --git a/nagios-nrpe-tomcat/defaults/main.yml b/nagios-nrpe-tomcat/defaults/main.yml new file mode 100644 index 00000000..d573d352 --- /dev/null +++ b/nagios-nrpe-tomcat/defaults/main.yml @@ -0,0 +1,8 @@ +--- + +check_tomcat_deps: + - libwww-perl + - liblwp-mediatypes-perl + - liblwp-useragent-determined-perl + - liblwp-protocol-https-perl + - libxml-xpath-perl diff --git a/nagios-nrpe-tomcat/files/check_tomcat b/nagios-nrpe-tomcat/files/check_tomcat new file mode 100644 index 00000000..d67cbec4 --- /dev/null +++ b/nagios-nrpe-tomcat/files/check_tomcat @@ -0,0 +1,387 @@ +#!/usr/bin/perl + +############################################################################# +# # +# This script was initially developed by Lonely Planet for internal use # +# and has kindly been made available to the Open Source community for # +# redistribution and further development under the terms of the # +# GNU General Public License v3: http://www.gnu.org/licenses/gpl.html # +# # +############################################################################# +# # +# This script is supplied 'as-is', in the hope that it will be useful, but # +# neither Lonely Planet nor the authors make any warranties or guarantees # +# as to its correct operation, including its intended function. # +# # +# Or in other words: # +# Test it yourself, and make sure it works for YOU. # +# # +############################################################################# +# Author: George Hansper e-mail: george@hansper.id.au # +############################################################################# + +use strict; +use LWP; +use LWP::UserAgent; +use Getopt::Std; +use XML::XPath; + +my %optarg; +my $getopt_result; + +my $lwp_user_agent; +my $http_request; +my $http_response; +my $url; +my $body; + +my @message; +my @message_perf; +my $exit = 0; +my @exit = qw/OK: WARNING: CRITICAL:/; + +my $rcs_id = '$Id: check_tomcat.pl,v 1.4 2013/03/15 10:45:41 george Exp $'; +my $rcslog = ' + $Log: check_tomcat.pl,v $ + Revision 1.4 2013/03/15 10:45:41 george + Fixed bug in % threads thresholds, which appear if multiple connectors are in use (thanks to Andreas Lamprecht for reporting this). + Changed MB to MiB in output text. + + Revision 1.3 2011/12/11 04:56:27 george + Added currentThreadCount to performance data. + + Revision 1.2 2011/11/18 11:30:57 george + Added capability to extract the connector names, and check any or all tomcat connectors for sufficient free threads. + Stripped quotes from connector names to work around tomcat7 quirkiness. + + Revision 1.1 2011/04/16 12:05:26 george + Initial revision + + '; + +# Defaults... +my $timeout = 10; # Default timeout +my $host = 'localhost'; # default host header +my $host_ip = 'localhost'; # default IP +my $port = 80; # default port +my $user = 'nagios'; # default user +my $password = 'nagios'; # default password +my $uri = '/manager/status?XML=true'; #default URI +my $http = 'http'; +my $connector_arg = undef; +my $opt_warn_threads = "25%"; +my $opt_crit_threads = "10%"; +my $warn_threads; +my $crit_threads; +# Memory thresholds are tight, because garbage collection kicks in only when memory is low anyway +my $opt_warn_memory = "5%"; +my $opt_crit_memory = "2%"; +my $warn_memory; +my $crit_memory; + +my $xpath; +my %xpath_checks = ( + maxThreads => '/status/connector/threadInfo/@maxThreads', + currentThreadCount => '/status/connector/threadInfo/@currentThreadCount', + currentThreadsBusy => '/status/connector/threadInfo/@currentThreadsBusy', + memMax => '/status/jvm/memory/@max', + memFree => '/status/jvm/memory/@free', + memTotal => '/status/jvm/memory/@total', +); +# XPath examples... +# /status/jvm/memory/@free +# /status/connector[attribute::name="http-8080"]/threadInfo/@maxThreads +# /status/connector/threadInfo/@* <- returns multiple nodes + +my %xpath_check_results; + +sub VERSION_MESSAGE() { + print "$^X\n$rcs_id\n"; +} + +sub HELP_MESSAGE() { + print <new; +$lwp_user_agent->timeout($timeout); +if ( $port == 80 || $port == 443 || $port eq "" ) { + $lwp_user_agent->default_header('Host' => $host); +} else { + $lwp_user_agent->default_header('Host' => "$host:$port"); +} + +$url = "$http://${host_ip}:${port}$uri"; +$http_request = HTTP::Request->new(GET => $url); + +printv "--------------- GET $url"; +printv $lwp_user_agent->default_headers->as_string . $http_request->headers_as_string; + +$http_response = $lwp_user_agent->request($http_request); +printv "---------------\n" . $http_response->protocol . " " . $http_response->status_line; +printv $http_response->headers_as_string; +printv "Content has " . length($http_response->content) . " bytes \n"; + +if ($http_response->is_success) { + $body = $http_response->content; + my $xpath = XML::XPath->new( xml => $body ); + my $xpath_check; + # Parse the data out of the XML... + foreach $xpath_check ( keys %xpath_checks ) { + #print keys(%{$xpath_check}) , "\n"; + my $path = $xpath_checks{$xpath_check}; + $path =~ s{\$port}{$port}; + #print $xpath_check->{xpath} , "\n"; + my $nodeset = $xpath->find($path); + if ( $nodeset->get_nodelist == 0 ) { + push @message, "$path not found"; + $exit |= 2; + push @message_perf, "$path=not_found"; + next; + } + foreach my $node ($nodeset->get_nodelist) { + my $connector_name = $node->getParentNode()->getParentNode()->getAttribute("name"); + $connector_name =~ s/^["'\s]+//; + $connector_name =~ s/["'\s]+$//; + my $value = $node->string_value(); + if ( $value =~ /^"?([0-9.]+)"?$/ ) { + $value = $1; + } else { + push @message, "$path is not numeric"; + $exit |= 2; + push @message_perf, "$path=not_numeric"; + next; + } + if ( $xpath_check =~ /^mem/ ) { + # This is the .../memory/.. xpath, just store the value in the hash + $xpath_check_results{$xpath_check} = $value; + } elsif ( $connector_name =~ /${connector_arg}/ && $connector_name ne "" ) { + # This is a .../threadInfo/... xpath, put the result into a hash (key is connector_name) + $xpath_check_results{$xpath_check}{$connector_name} = $value; + } + } + } + # Now apply the logic and check the results + #---------------------------------------------- + # Check memory + #---------------------------------------------- + my $jvm_mem_available = $xpath_check_results{memFree} + $xpath_check_results{memMax} - $xpath_check_results{memTotal}; + printv(sprintf("free=%d max=%d total=%d",$xpath_check_results{memFree}/1024, $xpath_check_results{memMax}/1024, $xpath_check_results{memTotal}/1024)); + if ( $opt_warn_memory =~ /(.*)%$/ ) { + $warn_memory = int($1 * $xpath_check_results{memMax} / 100); + } else { + # Convert to bytes + $warn_memory =int($opt_warn_memory * 1024 * 1024); + } + printv("warning at $warn_memory bytes (". ( $warn_memory / 1024 /1024 )."MiB) free, max=$xpath_check_results{memMax}"); + + if ( $opt_crit_memory =~ /(.*)%$/ ) { + $crit_memory = int($1 * $xpath_check_results{memMax} / 100); + } else { + # Convert to bytes + $crit_memory = int($opt_crit_memory * 1024 * 1024); + } + printv("critical at $crit_memory bytes (". ( $crit_memory / 1024 /1024 )."MiB) free, max=$xpath_check_results{memMax}"); + + if ( $jvm_mem_available <= $crit_memory ) { + $exit |= 2; + push @message, sprintf("Memory critical <%d MiB,",$crit_memory/1024/1024); + } elsif ( $jvm_mem_available <= $warn_memory ) { + $exit |= 1; + push @message, sprintf("Memory low <%d MiB,",$warn_memory/1024/1024); + } + push @message, sprintf("memory in use %d MiB (%d MiB);", + ( $xpath_check_results{memMax} - $jvm_mem_available ) / ( 1024 * 1024), + $xpath_check_results{memMax} / ( 1024 * 1024) + ); + push @message_perf, "used=".( $xpath_check_results{memMax} - $jvm_mem_available ) . " free=$jvm_mem_available max=$xpath_check_results{memMax}"; + + #---------------------------------------------- + # Check threads + #---------------------------------------------- + my $name; + foreach $name ( keys( %{$xpath_check_results{currentThreadsBusy}} ) ) { + + if ( $opt_warn_threads =~ /(.*)%$/ ) { + $warn_threads = int($1 * $xpath_check_results{maxThreads}{$name} / 100); + } else { + $warn_threads = $opt_warn_threads; + } + printv("warning at $warn_threads threads free, max=$xpath_check_results{maxThreads}{$name}"); + + if ( $opt_crit_threads =~ /(.*)%$/ ) { + $crit_threads = int($1 * $xpath_check_results{maxThreads}{$name} / 100); + } else { + $crit_threads = $opt_crit_threads; + } + printv("critical at $crit_threads threads free, max=$xpath_check_results{maxThreads}{$name}"); + + my $threads_available = $xpath_check_results{maxThreads}{$name} - $xpath_check_results{currentThreadsBusy}{$name}; + if ( $threads_available <= $crit_threads ) { + $exit |= 2; + push @message, sprintf("Critical: free_threads<%d",$crit_threads); + } elsif ( $threads_available <= $warn_threads ) { + $exit |= 1; + push @message, sprintf("Warning: free_threads<%d",$warn_threads); + } + push @message, sprintf("threads[$name]=%d(%d);", + $xpath_check_results{currentThreadsBusy}{$name}, + $xpath_check_results{maxThreads}{$name} + ); + if ( defined($optarg{n}) ) { + push @message_perf, "currentThreadsBusy[$name]=$xpath_check_results{currentThreadsBusy}{$name} currentThreadCount[$name]=$xpath_check_results{currentThreadCount}{$name} maxThreads[$name]=$xpath_check_results{maxThreads}{$name}"; + } else { + # For the sake of backwards-compatability of graphs etc... + push @message_perf, "currentThreadsBusy=$xpath_check_results{currentThreadsBusy}{$name} currentThreadCount=$xpath_check_results{currentThreadCount}{$name} maxThreads=$xpath_check_results{maxThreads}{$name}"; + } + } + if ( keys(%{$xpath_check_results{currentThreadsBusy}}) == 0 ) { + # no matching connectors found - this is not OK. + $exit |= 1; + push @message, "Warning: No tomcat connectors matched name =~ /$connector_arg/"; + } +} elsif ( $http_response->code == 401 ) { + print "WARNING: $url " . $http_response->protocol . " " . $http_response->status_line ."\n"; + exit 1; +} else { + print "CRITICAL: $url " . $http_response->protocol . " " . $http_response->status_line ."\n"; + exit 2; +} + +if ( $exit == 3 ) { + $exit = 2; +} + +print "$exit[$exit] ". join(" ",@message) . "|". join(" ",@message_perf) . "\n"; +exit $exit; diff --git a/nagios-nrpe-tomcat/tasks/main.yml b/nagios-nrpe-tomcat/tasks/main.yml new file mode 100644 index 00000000..6b6ffb9b --- /dev/null +++ b/nagios-nrpe-tomcat/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: Install the plugin dependencies + apt: pkg={{ item }} state=installed + with_items: check_tomcat_deps + tags: + - nagios + - nrpe + - check_tomcat + +- name: Install the check_tomcat plugin + copy: src=check_tomcat dest={{ nagios_isti_plugdir }}/check_tomcat owner=root group=root mode=0755 + tags: + - nagios + - nrpe + - check_tomcat + +- name: Install the check_tomcat nrpe commands file + template: src=check_tomcat-nrpe.cfg.j2 dest=/etc/nagios/nrpe.d/check_tomcat.cfg owner=root group=root mode=444 + notify: + - Reload NRPE server + tags: + - nrpe + - nagios + - check_tomcat + +- name: nagios needs root to execute some commands. We do it via sudo + template: src=nagios.sudoers.j2 dest=/etc/sudoers.d/nagios owner=root group=root mode=0440 + tags: + - nagios + - nrpe + diff --git a/nagios-server/defaults/main.yml b/nagios-server/defaults/main.yml new file mode 120000 index 00000000..03f8dc29 --- /dev/null +++ b/nagios-server/defaults/main.yml @@ -0,0 +1 @@ +../../nagios/defaults/main.yml \ No newline at end of file diff --git a/nagios/defaults/main.yml b/nagios/defaults/main.yml new file mode 100644 index 00000000..4a26c7b2 --- /dev/null +++ b/nagios/defaults/main.yml @@ -0,0 +1,55 @@ +--- +monitoring_group_name: 'change_this_on_your_playbook' +#nagios_server_local_plugdir: 'change_this_on_your_playbook' + +nagios_plugdir: /usr/lib/nagios/plugins +nagios_plugins_dir: '{{ nagios_plugdir }}' +nagios_centos_plugins_dir: /usr/lib64/nagios/plugins +nagios_isti_plugdir: '{{ nagios_plugdir }}/isti-cnr' +nagios_common_lib: check_library.sh +# Needed inside nrpe.cfg +# It is already defined in isti-global.yml +#nagios_monitoring_server_ip: 146.48.123.23 +nagios_check_disk_w: +nagios_check_disk_c: +nagios_hw: False +nagios_check_disk_w: 10 +nagios_check_disk_c: 5 +nagios_allowed_users: root + +nagios_monitoring_obj_dir: /etc/nagios3/objects +nagios_monitoring_dir: '{{ nagios_monitoring_obj_dir }}/{{ monitoring_group_name }}' + +nagios_server_files: + - contacts.cfg + - contactgroups.cfg + - generic-service.cfg + - generic-host.cfg + - hostgroups.cfg + - hosts.cfg + - services.cfg + - commands.cfg + +nagios_psql_query_time_w: 40 +nagios_psql_query_time_c: 60 +nagios_psql_db_size_w: 150000000 +nagios_psql_db_size_c: 170000000 + +nrpe_command_timeout: 420 +nrpe_include_dir: /etc/nagios/nrpe.d/ + +# Old stuff. To be deleted +nagios: + plugins_dir: '{{ nagios_plugdir }}' + centos_plugins_dir: '{{ nagios_centos_plugins_dir }}' + isti_plugdir: '{{ nagios_isti_plugdir }}' + common_lib: '{{ nagios_common_lib }}' + # Needed inside nrpe.cfg + monitoring_server_ip: '{{ nagios_monitoring_server_ip }}' + check_disk_w: '{{ nagios_check_disk_w }}' + check_disk_c: '{{ nagios_check_disk_c }}' + +nrpe: + command_timeout: '{{ nrpe_command_timeout }}' + include_dir: '{{ nrpe_include_dir }}' + diff --git a/nagios/files/check_dell_warranty.py b/nagios/files/check_dell_warranty.py new file mode 100644 index 00000000..a7be0da4 --- /dev/null +++ b/nagios/files/check_dell_warranty.py @@ -0,0 +1,752 @@ +#!/usr/bin/env python +''' +Nagios plug-in to pull the Dell service tag and check it +against Dell's web site to see how many days remain. By default it +issues a warning when there is less than thirty days remaining and critical +when there is less than ten days remaining. These values can be adjusted +using the command line, see --help. + + +Version: 4.1 +Created: 2009-02-12 +Author: Erinn Looney-Triggs +Revised: 2013-05-13 +Revised by: Erinn Looney-Triggs, Justin Ellison, Harald Jensas +https://gitorious.org/smarmy/check_dell_warranty/source/b6438fbef45ba22be3bf0aa2e0aa2e444a384813: +''' + +#============================================================================= +# TODO: omreport md enclosures, cap the threads, tests, more I suppose +# +# Revision history: +# 2013-05-13 4.1: Catch SSL exceptions from requests module. +# +# 2013-04-09 4.0: Moved to using api.dell.com and changed out urllib2 in +# preference to the requests library. +# +# 2012-10-08 3.0.2: Add support for hyphen dates +# +# 2012-10-07 3.0.1: Dell dropped the counter for days left from their site, +# this is now calculated internally. Add patch for European style dates +# with periods between that numbers. +# +# 2012-09-05 3.0: Use Net-SNMP bindings for python allowing SNMPv3 support. Add +# debugging output using -V, Small cleanups. +# +# 2012-08-23 2.2.3: Merge in patch from Colin Panisset to dedup serials before +# mutex is created +# +# 2012-07-30 2.2.2: Make regex slightly more robust on scrape. +# +# 2012-07-03 2.2.1: Fix version number mismatch, fix urllib exception catch, +# thanks go to Sven Odermatt for finding that. +# +# 2012-01-08 2.2.0: Fix to work with new website, had to add cookie handeling +# to prod the site correctly to allow scrapping of the information. +# +# 2010-07-19 2.1.2: Patch to again fix Dell's web page changes, thanks +# to Jim Browne http://blog.jbrowne.com/ as well as a patch to work against +# OM 5.3 +# +# 2010-04-13 2.1.1: Change to deal with Dell's change to their web site +# dropping the warranty extension field. +# +# 2009-12-17 2.1: Change format back to % to be compatible with python 2.4 +# and older. +# +# 2009-11-16 2.0: Fix formatting issues, change some variable names, fix +# a file open exception issue, Dell changed the interface so updated to +# work with that, new option --short for short output. +# +# 2009-08-07 1.9: Add smbios as a way to get the serial number. +# Move away from old string formatting to new string formatting. +# +# 2009-08-04 1.8: Improved the parsing of Dell's website, output is now much +# more complete (read larger) and includes all warranties. Thresholds are +# measured against the warranty with the greatest number of days remaining. +# This fixes the bug with doubled or even tripled warranty days being +# reported. +# +# 2009-07-24 1.7: SNMP support, DRAC - Remote Access Controller, CMC - +# Chassis Management Controller and MD/PV Disk Enclosure support. +# +# 2009-07-09 1.6: Threads! +# +# 2009-06-25 1.5: Changed optparse to handle multiple serial numbers. Changed +# the rest of the program to be able to handle multiple serial numbers. Added +# a de-duper for serial numbers just in case you get two of the same from +# the command line or as is the case with Dell blades, two of the same +# from omreport. So this ought to handle blades, though I don't have +# any to test against. +# +# 2009-06-05 1.4 Changed optparse to display %default in help output. Pretty +# up the help output with instead of variable names. Add description +# top optparse. Will now use prefer omreport to dmidecode for systems +# that have omreport installed and in $PATH. Note, that you do not have to be +# root to run omreport and get the service tag. +# +# 2009-05-29 1.3 Display output for all warranties for a system. Add up the +# number of days left to give an accurate count of the time remaining. Fix +# basic check for Dell's database being down. Fixed regex to be non-greedy. +# Start and end dates for warranty now takes all warranties into account. +# Date output is now yyyy-mm-dd because that is more international. +# +# 2009-05-28 1.2 Added service tag to output for nagios. Fixed some typos. +# Added command-line option for specifying a serial number. This gets +# rid of the sudo dependency as well as the newer python dependency +# allowing it to run on older RHEL distros. justin@techadvise.com +# +# 2009-05-27 1.1 Fixed string conversions to do int comparisons properly. +# Remove import csv as I am not using that yet. Add a license to the file. +# +# License: +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +#============================================================================= + +import datetime +import logging +import os +import subprocess +import sys + +__author__ = 'Erinn Looney-Triggs' +__credits__ = ['Erinn Looney-Triggs', 'Justin Ellison', 'Harald Jensas' ] +__license__ = 'GPL 3.0' +__maintainer__ = 'Erinn Looney-Triggs' +__email__ = 'erinn.looneytriggs@gmail.com' +__version__ = '4.1' +__date__ = '2009-02-12' +__revised__ = '2013-05-13' +__status__ = 'Production' + +#Nagios exit codes in English +UNKNOWN = 3 +CRITICAL = 2 +WARNING = 1 +OK = 0 + +try: + import requests +except ImportError: + print ('Python Requests module (http://docs.python-requests.org/) ' + 'is required for this to work.') + sys.exit(CRITICAL) + +def extract_mtk_community(): + ''' + Get SNMP community string from /etc/mtk.conf + ''' + mtk_conf_file = '/etc/mtk.conf' + + logger.debug('Obtaining serial number via {0}.'.format(mtk_conf_file)) + + if os.path.isfile(mtk_conf_file): + try: + for line in open(mtk_conf_file, 'r'): + token = line.split('=') + + if token[0] == 'community_string': + community_string = token[1].strip() + except IOError: + print 'Unable to open {0}, exiting!'.format(mtk_conf_file) + sys.exit(UNKNOWN) + else: + print ('The {0} file does not exist, ' + 'exiting!').format(mtk_conf_file) + sys.exit(UNKNOWN) + + return community_string + +def extract_service_tag(): + '''Extracts the serial number from the localhost using (in order of + precedence) omreport, libsmbios, or dmidecode. This function takes + no arguments but expects omreport, libsmbios or dmidecode to exist + and also expects dmidecode to accept -s system-serial-number + (RHEL5 or later). + + ''' + + dmidecode = which('dmidecode') + libsmbios = False + omreport = which('omreport') + service_tags = [] + + #Test for the libsmbios module + try: + logger.debug('Attempting to load libsmbios_c.') + import libsmbios_c + except ImportError: + logger.debug('Unable to load libsmbios_c continuing.') + pass + else: + libsmbios = True + + if omreport: + logger.debug('Obtaining serial number via OpenManage.') + import re + + try: + process = subprocess.Popen([omreport, "chassis", "info", + "-fmt", "xml"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + except OSError: + print 'Error: {0} exiting!'.format(sys.exc_info) + sys.exit(WARNING) + + text = process.stdout.read() + pattern = '''(\S+)''' + regex = re.compile(pattern, re.X) + service_tags = regex.findall(text) + + elif libsmbios: + logger.debug('Obtaining serial number via libsmbios_c.') + + #You have to be root to extract the serial number via this method + if os.geteuid() != 0: + print ('{0} must be run as root in order to access ' + 'libsmbios, exiting!').format(sys.argv[0]) + sys.exit(WARNING) + + service_tags.append(libsmbios_c.system_info.get_service_tag()) + + elif dmidecode: + logger.debug('Obtaining serial number via dmidecode.') + #Gather the information from dmidecode + + sudo = which('sudo') + + if not sudo: + print 'Sudo is not available, exiting!' + sys.exit(WARNING) + + try: + process = subprocess.Popen([sudo, dmidecode, "-s", + "system-serial-number"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + except OSError: + print 'Error: {0} exiting!'.format(sys.exc_info) + sys.exit(WARNING) + + service_tags.append(process.stdout.read().strip()) + + else: + print ('Omreport, libsmbios and dmidecode are not available in ' + '$PATH, exiting!') + sys.exit(WARNING) + + return service_tags + +def extract_service_tag_snmp( options ): + ''' + Extracts the serial number from the a remote host using SNMP. + This function takes the following arguments: hostname, community, + and mtk. The mtk argument will make the plug-in read the SNMP + community string from /etc/mtk.conf. (/etc/mtk.conf is used by + the mtk-nagios plugin. + (mtk-nagios plug-in: http://www.hpccommunity.org/sysmgmt/) + ''' + try: + import netsnmp + except ImportError: + print "Unable to load netsnmp python module, aborting!" + sys.exit(UNKNOWN) + + service_tags = [] + hostname = options.hostname + port = options.port + version = options.version + + logger.debug('Obtaining serial number via SNMP ' + 'version: {0}.'.format(version)) + + if version == 3: + sec_level = options.secLevel + sec_name = options.secName + priv_protocol = options.privProtocol + priv_password = options.privPassword + auth_protocol = options.authProtocol + auth_password = options.authPassword + + session = netsnmp.Session(DestHost=hostname, Version=version, + SecLevel=sec_level, SecName=sec_name, + AuthProto=auth_protocol, + AuthPass=auth_password, + PrivProto=priv_protocol, + PrivPass=priv_password, + RemotePort = port, + ) + + elif version == 2 or version == 1: + community = options.community + + session = netsnmp.Session(DestHost=hostname, Version=version, + Community=community, RemotePort=port) + + else: + print 'Unknown SNMP version {0}, exiting!'.format(version) + + + def _autodetect_dell_device(session): + + logger.debug('Beginning auto detection of system type.') + + var = netsnmp.VarList(netsnmp.Varbind('SNMPv2-SMI::enterprises', + '.674.')) + session.getnext(var) + tag = var.varbinds.pop().tag + + if tag.find('enterprises.674.10892.1.') != -1: + sys_type = 'omsa' #OMSA answered. + elif tag.find('enterprises.674.10892.2.') != -1: + sys_type = 'RAC' #Blade CMC or Server DRAC answered. + elif tag.find('enterprises.674.10895.') != -1: + sys_type = 'powerconnect' #PowerConnect switch answered. + else: + print ('snmpgetnext Failed:{0} System type or system ' + 'unknown!').format(tag) + sys.exit(WARNING) + + logger.debug('System type is: {0}'.format(sys_type)) + + return sys_type + + system_type = _autodetect_dell_device(session) + + #System is server with OMSA, will check for External DAS enclosure + #and get service tag. + if system_type == 'omsa': + + #Is External DAS Storage Enclosure connected? + var = netsnmp.VarList(netsnmp.Varbind('SNMPv2-SMI::enterprises', + '.674.10893.1.20.130.3.1.1')) + enclosure_ids = session.walk(var) + + logger.debug('Enclosure IDs: {0}'.format(enclosure_ids)) + + for enclosure_id in enclosure_ids: + + #For backwards compatibility with OM 5.3 + if not enclosure_id: + continue + + var = netsnmp.VarList(netsnmp.Varbind('SNMPv2-SMI::enterprises', + '.674.10893.1.20.130.3.1.16.{0}'.format(enclosure_id))) + + enclosure_type = session.get(var)[0] + + logger.debug('Enclosure type: {0}'.format(enclosure_type)) + + if enclosure_type != '1': #Enclosure type 1 is integrated backplane. + + #Get storage enclosure Service Tag. + var = netsnmp.VarList(netsnmp.Varbind('SNMPv2-SMI::enterprises', + '.674.10893.1.20.130.3.1.8.{0}'.format(enclosure_id))) + enclosure_serial_number = session.get(var)[0] + + logger.debug('Enclosure Serial Number obtained: {0}' + .format(enclosure_serial_number)) + + service_tags.append(enclosure_serial_number) + + #Get system Service Tag. + var = netsnmp.VarList(netsnmp.Varbind('SNMPv2-SMI::enterprises', + '.674.10892.1.300.10.1.11.1')) + + serial_number = session.get(var)[0] + + elif system_type == 'RAC': + var = netsnmp.VarList(netsnmp.Varbind('SNMPv2-SMI::enterprises', + '.674.10892.2.1.1.11.0')) + serial_number = session.get(var)[0] + + logger.debug('RAC serial number obtained: {0}'.format(serial_number)) + + elif system_type == 'powerconnect': + var = netsnmp.VarList(netsnmp.Varbind('SNMPv2-SMI::enterprises', + '.674.10895.3000.1.2.100' + '.8.1.4.1')) + serial_number = session.get(var)[0] + + logger.debug('PowerConnect serial number obtained: {0}' + .format(serial_number)) + + service_tags.append(serial_number) + + logger.debug('Service_tags obtained: {0}'.format(service_tags)) + + return service_tags + +# +# #Get enclosure type. +# # 1: Internal +# # 2: DellTM PowerVaultTM 200S (PowerVault 201S) +# # 3: Dell PowerVault 210S (PowerVault 211S) +# # 4: Dell PowerVault 220S (PowerVault 221S) +# # 5: Dell PowerVault 660F +# # 6: Dell PowerVault 224F +# # 7: Dell PowerVault 660F/PowerVault 224F +# # 8: Dell MD1000 +# # 9: Dell MD1120 + + +def get_warranty_https(service_tag_list, timeout): + ''' + Obtains the warranty information from Dell's website. This function + expects a list containing one or more serial numbers to be checked + against Dell's database. + ''' + + url = 'https://api.dell.com/support/v2/assetinfo/warranty/tags.json' + #Additional API keys, just in case: + #d676cf6e1e0ceb8fd14e8cb69acd812d + #849e027f476027a394edd656eaef4842 + + apikey = '1adecee8a60444738f280aad1cd87d0e' + + service_tags = '' + + if len(service_tag_list) == 1: + service_tags = service_tag_list[0] + else: + for service_tag in service_tag_list: + service_tags += service_tag + '|' + + #Because we can't have a trailing '|' + service_tags = service_tags.rstrip('|') + + logger.debug('Requesting service tags: {0}'.format(service_tags)) + + payload = {'svctags': service_tags, 'apikey': apikey} + + try: + response = requests.get(url, params=payload, verify=False, + timeout=timeout) + + except requests.exceptions.SSLError: + print 'Unable to verify SSL certificate for url: {0}'.format(url) + sys.exit(UNKNOWN) + + try: + #Throw an exception for anything but 200 response code + response.raise_for_status() + except requests.exceptions.HTTPError: + print 'Unable to contact url: {0}.format(url)' + sys.exit(UNKNOWN) + + logger.debug('Requesting warranty information from Dell url: ' + '{0}'.format(response.url)) + + result = response.json() + logger.debug('Raw output received: \n {0}'.format(result)) + + #We test for any faults assserted by the api. + check_faults(result) + + return result + +def check_faults(response): + ''' + This function checks the json content for faults that are rasied by Dell's + API. Any faults results in immediate termination with status UNKNOWN. + ''' + + logger.debug('Testing for faults in json response.') + fault = (response['GetAssetWarrantyResponse']['GetAssetWarrantyResult'] + ['Faults']) + logger.debug('Raw fault return: {0}'.format(fault)) + + if fault: + logger.debug('Fault found.') + + code = fault['FaultException']['Code'] + message = fault['FaultException']['Message'] + + print ('API fault code: "{0}" encountered, message: "{1}". ' + 'Exiting!'.format(code, message)) + sys.exit(UNKNOWN) + + logger.debug('No faults found.') + return None + +def build_warranty_line(warranty, full_line, days, short_output): + ''' + This function takes a warranty object and parses the salient information + out. It then calculates the number of days remaining in the warranty + period, and builds a line for Nagios outputting. + ''' + + logger.debug('Warranty contains') + + description = warranty['ServiceLevelDescription'] + end_date = warranty['EndDate'] + start_date = warranty['StartDate'] + provider = warranty['ServiceProvider'] + + logger.debug('Found: Start date: {0}, End Date: {1},Description: {2}, ' + 'Provider: {3}'.format(start_date, end_date, description, + provider)) + + #Because we need ot be able to calculate the time left as well as + #better formatting. + start_date = convert_date(start_date) + end_date = convert_date(end_date) + + days_left = (end_date - datetime.date.today()).days + + #Because no one cares about egative numbers of days. + if days_left < 0: + days_left = 0 + + logger.debug('Number of days left in warranty: ' + '{0}'.format(days_left)) + + if short_output: + full_line = "%s, End: %s, Days left: %i" %(full_line, str(end_date.strftime('%m/%d/%Y')), days_left) + + else: + full_line = "%s, Warranty: %s, Start: %s, End: %s, Days left: %i" %(full_line, description, str(start_date.strftime('%m/%d/%Y')), + str(end_date.strftime('%m/%d/%Y')), days_left) + + days.append(int(days_left)) + + return full_line, days + +def convert_date(date): + ''' + This function converts the date as returned by the Dell API into a + datetime object. Dell's API format is as follows: 2010-07-01T01:00:00 + ''' + #Split on 'T' grab the date then split it out on '-' + year, month, day = date.split('T')[0].split('-') + + return datetime.date(int(year), int(month), int(day)) + +def process_asset(asset, full_line, days, short_output): + ''' + This function processes a json asset returned from Dell's API and + builds a line appropriate for Nagios output, as well as the service + tag for the line and the number of days remaining for each warranty + as a list. + ''' + + logger.debug('Raw asset being processed: {0}'.format(asset)) + + service_tag = asset['ServiceTag'] + warranty = asset['Warranties']['Warranty'] + if ( type(warranty) == type([]) ) and len(warranty) > 0: + warranty = warranty[0] + full_line, days = build_warranty_line(warranty, full_line, + days, short_output) + + return service_tag, full_line, days + +def parse_exit(result, short_output): + + critical = 0 + days = [] + warning = 0 + full_line = r'%s: Service Tag: %s' + + logger.debug('Beginning to parse results and construct exit line ' + 'and code.') + + assets = (result['GetAssetWarrantyResponse']['GetAssetWarrantyResult'] + ['Response']['DellAsset']) + + logger.debug('Assets obtained: {0}'.format(assets)) + + #Check if there are multiple assets being provided + if isinstance(assets, list): + logger.debug('Multiple assets being processed.') + + for asset in assets: + service_tag, full_line, days = process_asset(asset, full_line, + days, short_output) + + #There is only one asset + else: + logger.debug('A single asset is being processed.') + asset = assets + service_tag, full_line, days = process_asset(asset, full_line, + days, short_output) + + #Put the days remaining in ascending order + days.sort() + + logger.debug('Days remaining on warranties: {0}'.format(days)) + + if days[-1] < options.critical_days: + state = 'CRITICAL' + critical += 1 + + elif days[-1] < options.warning_days: + state = 'WARNING' + warning += 1 + + else: + state = 'OK' + + print full_line % (state, service_tag), + + if critical: + sys.exit(CRITICAL) + elif warning: + sys.exit(WARNING) + else: + sys.exit(OK) + + return None #Should never get here + +def sigalarm_handler(signum, frame): + ''' + Handler for an alarm situation. + ''' + + print ('{0} timed out after {1} seconds, ' + 'signum:{2}, frame: {3}').format(sys.argv[0], options.timeout, + signum, frame) + + sys.exit(CRITICAL) + return None + +def which(program): + '''This is the equivalent of the 'which' BASH built-in with a check to + make sure the program that is found is executable. + ''' + + def is_exe(file_path): + '''Tests that a file exists and is executable. + ''' + return os.path.exists(file_path) and os.access(file_path, os.X_OK) + + file_path = os.path.split(program)[0] + + if file_path: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + exe_file = os.path.join(path, program) + if is_exe(exe_file): + return exe_file + + return None + +if __name__ == '__main__': + import optparse + import signal + + parser = optparse.OptionParser(description='''Nagios plug-in to pull the +Dell service tag and check it against Dell's web site to see how many +days remain. By default it issues a warning when there is less than +thirty days remaining and critical when there is less than ten days +remaining. These values can be adjusted using the command line, see --help. +''', + prog="check_dell_warranty", + version="%prog Version: {0}".format(__version__)) + parser.add_option('-a', dest='authProtocol', action='store', + help=('Set the default authentication protocol for ' + 'SNMPv3 (MD5 or SHA).')) + parser.add_option('-A', dest='authPassword', + help=('Set the SNMPv3 authentication protocol password.') + ) + parser.add_option('-C', '--community', action='store', + dest='community', type='string',default='public', + help=('SNMP Community String to use. ' + '(Default: %default)')) + parser.add_option('-c', '--critical', dest='critical_days', default=10, + help=('Number of days under which to return critical ' + '(Default: %default).'), type='int', metavar='') + parser.add_option('-H', '--hostname', action='store', type='string', + dest='hostname', + help='Specify the host name of the SNMP agent') + parser.add_option('-l', dest='secLevel', default='noAuthNoPriv', + action='store', + help=('Set the SNMPv3 security level, (noAuthNoPriv' + '|authNoPriv|authPriv) (Default: noAuthNoPriv)')) + parser.add_option('--mtk', action='store_true', dest='mtk_installed', + default=False, + help=('Get SNMP Community String from /etc/mtk.conf if ' + 'mtk-nagios plugin is installed. NOTE: This option ' + 'will make the mtk.conf community string take ' + 'precedence over anything entered at the ' + 'command line (Default: %default)')) + parser.add_option('-p', '--port', dest='port', default=161, + help=('Set the SNMP port to be connected to ' + '(Default:161).'), type='int') + parser.add_option('-s', '--service_tag', dest='service_tag', + help=('Dell Service Tag of system, to enter more than ' + 'one use multiple flags (Default: auto-detected)'), + action='append', metavar='') + parser.add_option('-S', '--short', dest='short_output', + action='store_true', default = False, + help=('Display short output: only the status, ' + 'service tag, end date and days left for each ' + 'warranty.')) + parser.add_option('-t', '--timeout', dest='timeout', default=10, + help=('Set the timeout for the program to run ' + '(Default: %default seconds)'), type='int', + metavar='') + parser.add_option('-u', dest='secName', action='store', + help='Set the SNMPv3 security name (user name).') + parser.add_option('-v', dest='version', default=3, action='store', + help=('Specify the SNMP version (1, 2, 3) Default: 3'), + type='int' + ) + parser.add_option('-V', dest='verbose', action='store_true', + default=False, help =('Give verbose output (Default: ' + 'Off)') + ) + parser.add_option('-w', '--warning', dest='warning_days', default=30, + help=('Number of days under which to return a warning ' + '(Default: %default)'), type='int', metavar='' ) + parser.add_option('-x', dest='privProtocol', action='store', + help='Set the SNMPv3 privacy protocol (DES or AES).') + parser.add_option('-X', dest='privPassword', action='store', + help='Set the SNMPv3 privacy pass phrase.') + + (options, args) = parser.parse_args() + + ##Configure logging + logger = logging.getLogger("check_dell_warranty") + handler = logging.StreamHandler() + if options.verbose: + sys.stderr.write('Switching on debug mode.\n') + handler.setLevel(logging.DEBUG) + logger.setLevel(logging.DEBUG) + + ##Set the logging format, time, log level name, and the message + formatter = logging.Formatter('%(levelname)s - %(message)s') + handler.setFormatter(formatter) + + logger.addHandler(handler) + + signal.signal(signal.SIGALRM, sigalarm_handler) + signal.alarm(options.timeout) + + if options.service_tag: + SERVICE_TAGS = options.service_tag + elif options.hostname or options.mtk_installed: + SERVICE_TAGS = extract_service_tag_snmp(options) + else: + SERVICE_TAGS = extract_service_tag() + + RESULT = get_warranty_https(SERVICE_TAGS, options.timeout) + signal.alarm(0) + + parse_exit(RESULT, options.short_output) + diff --git a/nagios/files/check_linux_raid b/nagios/files/check_linux_raid new file mode 100755 index 00000000..ab982cd6 --- /dev/null +++ b/nagios/files/check_linux_raid @@ -0,0 +1,115 @@ +#!/usr/bin/perl -w + +# Copyright (c) 2002 ISOMEDIA, Inc. +# originally written by Steve Milton +# later updates by sean finney +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# Usage: check_raid [raid-name] +# Example: check_raid md0 +# WARNING md0 status=[UUU_U], recovery=46.4%, finish=123.0min + +use strict; +use lib "/usr/lib/nagios/plugins"; +use utils qw(%ERRORS); + +# die with an error if we're not on Linux +if ($^O ne 'linux') { + print "This plugin only applicable on Linux.\n"; + exit $ERRORS{'UNKNOWN'}; +} + +sub max_state($$){ + my ($a, $b) = @_; + if ($a eq "CRITICAL" || $b eq "CRITICAL") { return "CRITICAL"; } + elsif ($a eq "WARNING" || $b eq "WARNING") { return "WARNING"; } + elsif ($a eq "OK" || $b eq "OK") { return "OK"; } + elsif ($a eq "UNKNOWN" || $b eq "UNKNOWN") { return "UNKNOWN"; } + elsif ($a eq "DEPENDENT" || $b eq "DEPENDENT") { return "DEPENDENT"; } + return "UNKNOWN"; +} + +my $nextdev; +if(defined $ARGV[0]) { $nextdev = shift; } +else { $nextdev = "md[0-9]+"; } + +my $code = "UNKNOWN"; +my $msg = ""; +my %status; +my %recovery; +my %finish; +my %active; +my %devices; + +while(defined $nextdev){ + open (MDSTAT, "< /proc/mdstat") or die "Failed to open /proc/mdstat"; + my $device = undef; + while() { + if (defined $device) { + if (/(\[[_U]+\])/) { + $status{$device} = $1; + } elsif (/recovery = (.*?)\s/) { + $recovery{$device} = $1; + ($finish{$device}) = /finish=(.*?min)/; + $device=undef; + } elsif (/^\s*$/) { + $device=undef; + } + } elsif (/^($nextdev)\s*:/) { + $device=$1; + $devices{$device}=$device; + if (/\sactive/) { + $status{$device} = ''; # Shall be filled later if available + $active{$device} = 1; + } + } + } + $nextdev = shift; +} + +foreach my $k (sort keys %devices){ + if (!exists($status{$k})) { + $msg .= sprintf " %s inactive with no status information.", + $devices{$k}; + $code = max_state($code, "CRITICAL"); + } elsif ($status{$k} =~ /_/) { + if (defined $recovery{$k}) { + $msg .= sprintf " %s status=%s, recovery=%s, finish=%s.", + $devices{$k}, $status{$k}, $recovery{$k}, $finish{$k}; + $code = max_state($code, "WARNING"); + } else { + $msg .= sprintf " %s status=%s.", $devices{$k}, $status{$k}; + $code = max_state($code, "CRITICAL"); + } + } elsif ($status{$k} =~ /U+/) { + $msg .= sprintf " %s status=%s.", $devices{$k}, $status{$k}; + $code = max_state($code, "OK"); + } else { + if ($active{$k}) { + $msg .= sprintf " %s active with no status information.", + $devices{$k}; + $code = max_state($code, "OK"); + } else { + # This should't run anymore, but is left as a catch-all + $msg .= sprintf " %s does not exist.\n", $devices{$k}; + $code = max_state($code, "CRITICAL"); + } + } +} + +print $code, $msg, "\n"; +exit ($ERRORS{$code}); + diff --git a/nagios/files/check_netint.pl b/nagios/files/check_netint.pl new file mode 100644 index 00000000..7f9fdaad --- /dev/null +++ b/nagios/files/check_netint.pl @@ -0,0 +1,2731 @@ +#!/usr/bin/perl -w +# +# =============================== SUMMARY ===================================== +# +# Program : check_netint.pl or check_snmp_netint.pl +# Version : 2.4 alpha 9 +# Date : Nov 30, 2012 +# Maintainer: William Leibzon - william@leibzon.org, +# Authors : See "CONTRIBUTORS" documentation section +# Licence : GPL - summary below, full text at http://www.fsf.org/licenses/gpl.txt +# +# ********************* IMPORTANT NOTE ABOUT THIS VERSION ******************** +# *** THIS IS AN ALPHA/DEVELOPMENT RELEASE WHICH HAS NOT BEEN FULLY TESTED *** +# *** IF YOU NEED A STABLE VERSION, PLEASE GET 2.36 VERSION OF THIS PLUGIN *** +# *** AT HTTP://william.leibzon.org/nagios/ or http://exchange.nagios.org/ *** +# **************************************************************************** +# +# =========================== PROGRAM LICENSE ================================= +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +# +# ===================== INFORMATION ABOUT THIS PLUGIN ========================= +# +# This is a plugin for nagios to check network interfaces (network ports) +# on servers switches & routers. It is based on check_snmp_int.pl plugin +# by Patrick Ploy with extensive rewrites for performance improvements +# and additions to support Cisco and other switches (plugin can query and +# cache cisco port names, port link data and switch STP status and more) +# The plugin can use nagios-stored previous performance data to give port +# traffic & utilization without creation of temporary files. This new 2.4 +# version supports checking interface on a local linux server without SNMP. +# +# ====================== SETUP AND PLUGIN USE NOTES ========================= +# +# Help : ./check_snmp_netint.pl -h +# above will tell you most you probalby need for this to make this plugin work +# +# Patrick's Site: http://nagios.manubulon.com/snmp_int.html +# documentation reproduced below for options shared with check_snmp_int +# +# If you're using -P option to pass performance data back to plugin then +# you may (depending on version of nagios) also need to modify nagios.cfg +# and remove ' from illegal_macro_output_chars=`~$&|'"<> line, i.e. change to +# illegal_macro_output_chars=`~$&|"<> +# +# ------------------------------------------------------------------------------ +# Checks by snmp (v1, v2c or v3) host interface state and usage. +# Interfaces can be selected by regexp ('eth' will check eth0,eth1,eth2, ...). +# If multiple interfaces are selected, all must be up to get an OK result +# +# Standard checks: +# The script will check interface operational status using the MIB-II table. +# To see how interface looks like in snmp, you can list all with the '-v'. +# +# The interfaces are selected by their description in the MIB-II table. +# The interface is/are selected by the -n option. This option will be treated +# as a regular expression (eth will match eth0,eth1,eth2...). You can disable +# this with the -r option : the interface will be selected if it's description +# exactly matches the name given by -n +# +# The script will return OK if ALL interfaces selected are UP, or CRITICAL +# if at least one interface is down. You can make the script return a OK +# value when all interfaces are down (and CRITICAL when at least one is up) +# with the -i option. You can make the same tests on administrative status +# instead with the -a option. If you have ISDN interface, and want that +# DORMANT state returns ok, put -D. +# +# To make output shorter, specially when you have many interfaces, you can put +# the -s option. It will get only the first characters of the interface +# description. If the number is negative then get the last characters. +# Ex : EL20005 3Com Gigabit NIC (3C2000 Family) +# -s 4 will output : "EL20". +# -s -4 will output : "ily)". +# +# Performance output +# -f option : performance output (default the In/out octet as a counter). +# -e option : in/out errors and discarded packets. -f must also be set. +# -S option : Include speed in performance output in bits/s as '_speed_bps' +# -y option : output performance data in % of interface speed +# -Y option : output performance data in bits/s or Bytes/s (depending on -B) +# Note : -y and -Y options need the usage check to ba active (-k) +# Warning : the counters needed by -e are not available on all devices +# +# Usage check +# -k : activates the standard usage feature +# -q : activates the extended usage +# -d : delta in seconds (default is 300s) +# -w : warning levels +# -c : critical levels +# +# If you specify '-k' a temporary file will be created in "/tmp" by default +# (unless -P option is also used, see below). Directory and start of filename +# can be set with '-F' option with result file being like +# tmp_Nagios_int.., one file per interface +# +# If you do "-k -P \$SERVICEPERFDATA\$ -T \$LASTSERVICECHECK\$" then no file +# is created and instead data from previous check is feed back into plugin. +# +# The status UNKNOWN is returned when the script doesn't have enough +# information (see -d option). You will have to specify the warning and +# critical levels, separated with "," and you can use decimal (ex : 10.3). +# For standard checks (no "-q") : +# -w , -c , +# In warn : warning level for incomming traffic +# Out warn : warning level for outgoing traffic +# In crit : critical level for incomming traffic +# Out crit : critical level for outgoing traffic +# +# Use 0 if you do not want to specify any warning or critical threshold +# You can also use '-z' option which makes specifying -w and -c optional +# but still allows to see all the values in status or use them for graphing +# +# The unit for the check depends on the -B, -M and -G option : +# B set -B not set +# ----------------+--------+------------- +# -M & -G not set | Kbps | KBps +# -M set | Mbps | MBps +# -G set | Gbps | GBps +# +# It is possible to put warning and critical levels with -b option. +# 0 means no warning or critical level checks +# +# When the extended checks are activated (-q option), the warning levels are +# -w ,,,,, +# -c ,, ..... +# In error : warn/crit level in inboud error/minute +# Out error : warn/crit level in outbound error/minute +# In disc : warn/crit level in inboud discarded packets/minute +# Out disc : warn/crit level in outbound discarded packets/minute +# +# -d: delta time +# You can put the delta time as an option : the "delta" is the prefered time +# between two values that the script will use to calculate the average +# Kbytes/s or error/min. The delta time should (not must) be bigger than +# the check interval. +# Here is an example : Check interval of 2 minutes and delta of 4min +# T0 : value 1 : can't calculate usage +# T0+2 : value 2 : can't calculate usage +# T0+4 : value 3 : usage=(value3-value1)/((T0+4)-T0) +# T0+6 : value 4 : usage=(value4-value2)/((T0+6)-T0+2) +# (Yes I know TO+4-T0=4, it's just to explain..) +# The script will allow 10% less of the delta and 300% more than delta +# as a correct interval. For example, with a delta of 5 minutes, the +# acceptable interval will be between 4'30" and 15 minutes. +# +# Msg size option (-o option) +# In case you get a "ERROR: running table: Message size exceeded maxMsgSize" +# error, you may need to adjust the maxMsgSize, i.e. the maximum size of +# snmp message with the -o option. Try a value with -o AND the -v option, +# the script will output the actual value so you can add some octets to it +# with the -o option. +# +# --label option +# This option will put label before performance data value: +# Without : eth1:UP (10.3Kbps/4.4Kbps), eth0:UP (10.9Kbps/16.4Kbps):2 UP: OK +# With : eth1:UP (in=14.4Kbps/out=6.2Kbps), eth0:UP (in=15.3Kbps/out=22.9Kbps):2 UP: OK +# +# Note: Do not rely on this option meaning same thing in the future, it may be +# changed to specify label to put prior to plugin output with this +# option changing to something else... +# +# ----------------------------------------------------------------------------- +# Below is documentation for options & features unique to check_snmp_netint +# that were not part of check_snmp_int: +# +# I. Plugin execution time and performance and optimization options: +# 1. [default] By default this plugin will first read full +# 'ifindex description' table and from that determine which interfaces +# are to be checked by doing regex with name(s) given at -n. It will +# then issue several SNMP queries - one for operational or admin status +# and 2nd one for "performance" data. If '--cisco' and '--stp' options +# are given then several more queries are done to find mapping from +# cisco ports to ifindex and separate mapping between ifindex and stp, +# only then can queries be done for additional cisco & stp status data. +# 2. ['minimize_queries'] If you use '-m' ('--minimize_queries') option then +# all queries for status data are done together but the original ifindex +# snmp table is still read separately. By itself this brings about 30% +# speed improvement +# 3. ['minimize_queries' and -P] When using '-f -m -P "$SERVICEPERFDATA$"' +# as options, your nagios config performance data is feed back and +# used as a placeholder to cache information on exactly which +# interfaces need to be queried. So no aditional description table +# lookup is necessary. Similarly data for '--cisco' and '--stp' +# maps is also cached and reused. There is only one SNMP query done +# together for all data OIDs (status & performance data) and +# for all interfaces; this query also includes query for specific +# description OID (but not reading entire table) which is then +# compared against cached result to make sure ifindex has not changed. +# Once every 12 hours full check is done and description data is recached. +# 65% to 90% or more speed improvements are common with this option. +# 4. ['minimum_queries' and -P] Using '-f -mm -P "$SERVICEPERFDATA$"' +# is almost the same as "-m" but here extra check that interface +# description is still the same is not done and recaching is every +# 3 days instead of 12 hours. Additionally port speed data is also +# cached and not checked every time. These provide marginal extra +# plugin execution time impovements over '-m' (75%-100% improvement +# over not doing -m) but is not safe for devices where port ifindex +# may change (i.e. switches with removeable interface modules). +# But in 99% of the cases it should be ok do to use this option. +# +# II. As mentioned previously when you want to see current traffic in/out & +# utilization data (-k option) for interface this requires previous octet +# count data to calculate average and so normally this requires temporary +# file (see -F option). But when you feed nagios performance data back to +# plugin as per above that means you already provide with at least one set +# of previous data, so by also adding '-T $LASTSERVICECHECK$' (which is time +# of last check when this data was cached) you can have this plugin report +# current traffic in Mb (or kb, etc) without any temporary files. +# +# As of version 2.1 its possible to also have short history as part of +# performance data output i.e. plugin will output not only the +# most current data but also one or more sets of previous data. +# Bandwidth calculations are then less "bursty". Total number of such +# sets is controlled with '--pcount' option and by default is 2. +# If you have only one interface checked with this plugin its probably +# safe to increase this to 3 or 4, but larger values or more interfaces +# are an issue unless you increased size of nagios buffer used to +# store performance data. +# +# III.For those of you with Cisco switches you may have noticed that they +# do not provide appropriate port names at standard SNMP ifdescr table. +# There are two options to help you: +# 1. If you set custom port names ('set port 1/xx name zzz") you can use +# those names with "--cisco=use_portnames" option. +# 2. Another option is specify custom description table with +# "-N 1.3.6.1.2.1.31.1.1.1.1" +# and optionally display "set port name" as a comment. +# Its recommended you try both: +# "-N 1.3.6.1.2.1.31.1.1.1.1 --cisco=show_portnames" and +# "-O 1.3.6.1.2.1.31.1.1.1.1 --cisco=use_portnames" +# and see which works best in your case +# +# Additionally when using "--cisco" option the plugin will attempt to +# retrieve port status information from 3 cisco-specific tables (see below). +# If any "unusual" status is listed there the output is provided back - this +# can be useful to diagnose if you have faulty cable or if the equipment +# on the other end is bad, etc. The tables retrieved are: +# --cisco=oper portOperStatus = 1.3.6.1.4.1.9.5.1.4.1.1.6 +# --cisco=linkfault portLinkFaultStatus = 1.3.6.1.4.1.9.5.1.4.1.1.22 +# --cisco=addoper portAdditionalOperStatus = 1.3.6.1.4.1.9.5.1.4.1.1.23 +# --cisco=noauto special option - none of the above +# You can mix-match more then one table (together with show_portnames) or not +# specify at all (i.e. just '--cisco') in which case plugin will attempt to +# retrieve data from all 3 tables first time (stop with '--cisco=noauto') +# but if you use caching (-m) it will output and cache which table actually +# had usable data and will not attempt to retrieve from tables that did +# not exist on subsequent calls. +# +# IV. Support is also provided to query STP (Spanning Tree Protocol) status +# of the port. Although only tested with cisco switches, this is +# standartized SNMP data and should be supported by few other vendors +# so separate '--stp' option will work without '--cisco' option. +# The plugin will report single WARNING alert if status changes so +# be prepared for some alerts if your network is undergoing reorganization +# due to some other switch getting unplugged. Otherwise STP status is also +# very useful diagnostic data if you're looking as to why no traffic is +# passing through particular interface... +# +# ============================ EXAMPLES ======================================= +# +# First set of examples is from Patrick's site: +# +# check_snmp_netint using snmpv1: +# define command{ +# command_name check_snmp_int_v1 +# command_line $USER1$/check_snmp_netint.pl -H $HOSTADDRESS$ $USER7$ -n $ARG1$ $ARG2$ +# } +# Checks FastEthernet 1 to 6 are up (snmpv1): +# define service { +# name check_int_1_6 +# check_command check_snmp_int_v1!"FastEthernet-[1-6]" +# } +# Checks input bandwith on eth1 is < 100 KBytes/s and output is < 50 Kbytes/s +# (critical at 0,0 means no critical levels). (snmpv3): +# define service { +# name check_int_eth0_bdw +# check_command check_snmp_int_v3!eth0!-k -w 100,50 -c 0,0 +# } +# ---------------------------------------------------------------- +# +# Linux server with one or more eth? and one or more bond? interface: +# define command { +# command_name check_snmp_network_interface_linux +# command_line $USER1$/check_snmp_int.pl -2 -f -e -C $USER6$ -H $HOSTADDRESS$ +# -n $ARG1$ -w $ARG2$ -c $ARG3$ -d 200 -q -k -y -M -B +# -m -P "$SERVICEPERFDATA$" -T "$LASTSERVICECHECK$" +# } +# define service{ +# use std-service +# servicegroups snmp,netstatistics +# hostgroup_name linux +# service_description Network Interfaces +# check_command check_snmp_network_interface_linux!"eth|bond"!50,50,0,0,0,0!100,100,0,0,0,0 +# } +# +# Alteon switch - really funky device that does not like snmp v2 queries +# (so no -2) and no good interface names table. Therefore normal ifindex +# is used instead with index->names translation somewhat "guessed" manually +# with snmpwalk based on data (for those who want to know more, the first +# 255 ids are reserved for VLANs): +# define command { +# command_name check_snmp_network_interface_alteon +# command_line $USER1$/check_snmp_netint.pl -f -C $USER5$ -H $HOSTADDRESS$ +# -N 1.3.6.1.2.1.2.2.1.1 -n $ARG1$ -w $ARG2$ -c $ARG3$ -d 200 -k -y +# -M -B -m -P "$SERVICEPERFDATA$" -T "$LASTSERVICECHECK$" +# } +# define service{ +# use std-switch-service +# servicegroups snmp,netstatistics +# hostgroup_name alteon184 +# service_description Alteon Gigabit Port 1 +# check_command check_snmp_network_interface_alteon!"257"!0,0!0,0 +# } +# +# Cisco CatOS switch (will work for 5500 and many others), full set of possible options is given: +# define command { +# command_name check_snmp_network_interface_catos +# command_line $USER1$/check_snmp_netint.pl -2 -f -C $USER5$ +# -H $HOSTADDRESS$ -N 1.3.6.1.2.1.31.1.1.1.1 --cisco=show_portnames --stp +# -n $ARG1$ -w $ARG2$ -c $ARG3$ -d 200 -e -q -k -y -M -B -mm +# -P "$SERVICEPERFDATA$" -T "$LASTSERVICECHECK$" +# } +# define service{ +# use std-switch-service +# servicegroups snmp,netstatistics +# hostgroup_name cs2948 +# service_description GigabitEthernet2/1 +# check_command check_snmp_network_interface_catos!"2/1$"!0,0,0,0,0,0!0,0,0,0,0,0 +# } +# +# Cisco 2960 (IOS) switch (has only portOperStatus extended port state table): +# define command { +# command_name check_snmp_network_interface_cisco2960 +# command_line $USER1$/check_snmp_netint.pl -2 -f -C $USER5$ +# -H $HOSTADDRESS$ --cisco=oper,show_portnames --stp -n $ARG1$ -w $ARG2$ +# -c $ARG3$ -d $USER8$ -e -q -k -y -M -B -mm -P "$SERVICEPERFDATA$" +# -T "$LASTSERVICECHECK$" --label +# } +# define service{ +# use std-switch-service +# servicegroups snmp,netstatistics +# hostgroup_name cs2960 +# service_description GigabitEthernet0/1 +# check_command check_snmp_network_interface_cisco2960!"GigabitEthernet0/1$"!0,0,0,0,0,0!0,0,0,0,0,0 +# } +# +# Other ports on above switches are defined similarly as separate services - +# you don't have to do it this way though, but all 48 ports is too much for +# one check to handle so if you have that many split checks into groups of +# no more then 12 ports +# +# ======================= VERSIONS and CHANGE HISTORY ========================= +# +# [1.4] This plugin is based on (with now about 60% rewrite or new code) +# release 1.4 (somewhere around May 2007) of the check_snmp_int +# plugin by Patrick Ploy. This is info provided with 1.4 version: +# ---------------------------------------------------------- +# Version : 1.4.1 +# Date : Jul 9 2006 +# Author : Patrick Proy ( patrick at proy.org ) +# Help : http://www.manubulon.com/nagios/ +# Licence : GPL - http://www.fsf.org/licenses/gpl.txt +# Contrib : J. Jungmann, S. Probst, R. Leroy, M. Berger +# TODO : +# Check isdn "dormant" state +# Maybe put base directory for performance as an option +# ---------------------------------------------------------- +# +# The first changes for performance improvements were started in around +# October 2006 with code base at version 1.4.1 of Patrick's check_snmp_int +# plugin. Patricks's latest code from about May 2007 was ported back into +# code maintained by WL (exact 1.4.x version of this port is unclear). +# Those early performance improvement code changes are now invoked with +# 'minimize_queries' (but without -P) option and allow to do query +# for status data for all ports together. Additionally -N option to +# specify different port names table OID was added in 2006 as well. +# Also -F option from above TODO was added too. +# +# [1.5] 06/01/07 - Main code changes by William to allow the plugin to reuse +# its previous performance data (passed with $SERVICEPERFDATA$ macro). +# The changes were extensive and allow to reuse this data in way similar +# to maintaining history file and result in traffic rate (per Mb/Gb etc) +# being reported in the output. Additionally of paramout importance was +# saving list of ports to check (i.e. result of regex) which means that +# port/interface names table do not need to be checked with SNMP every +# time and instead specific ports OIDs can be retrieved with bulk request +# (this is what results in up to 75% performance improvement). +# About 30-40% of the original code was rewritten for these changes and +# '--minimize_queries' (or '-m') option added - back then it acted more +# like '--minimum_queries' or '-mm' in 2.0 release +# [1.5.5] 07/15/07 - Code additions to support cisco-specific data given +# with '--cisco' option. Support is both for using cisco port names +# for regex matching of ports and for using different table for regex +# matching but adding cisco port name as additional comment/description. +# Also cisco-specific port status data (info on if cable is attached, +# etc) are also retrieved & added as additional commentary to port +# UP/DOWN status. Additional coding work on performance improvements +# was also done somewhere between June and July 2007 which in part resulted +# in separation of "--minimize_queries" and "--minimum_queries" options. +# [1.5.7] 07/22/07 - This is when code to support retrieval of STP data +# and '--stp' option were added. Also some more code cleanup related +# to 1.5.x to better support cisco switches. +# +# A code from locally maintained but never released to public 1.5.7 +# branch was sent by William to Patrick sometime in early August 2007. +# He briefly responded back that he'll look at it later but never +# responded further and did not incorporate this code into his main +# branch releasing newer version of 1.4.x. As a result since there is +# public benefit in new code due to both performance and cisco-specific +# improvements, this will now be released as new plugin 'check_snmp_netint" +# with branch version startint at 2.0. The code will be maintained +# by William unless Patrick wants to merge it into his branch later. +# There is about 50% code differences (plugin header documentation you're +# reading are not counted) between this release and check_snmp_int 1.4 +# which is where this plugin started from. +# +# [2.0] 12/20/07 - First public release as check_snmp_netint plugin. Primary +# changes from 1.5.7 are the "header" with history and documentation +# which are necessary for such public release, copyright notice changed +# (W. Leibzon was listed only as contributor before), etc. +# +# [2.1] 12/26/07 - Support for more then one set of previous data in +# performance output to create short history for better bandwidth +# check results. New option '--pcount' controls how many sets. +# 12/27/07 - Finally looked deeper into code that calculates bandwidth +# and speed data and saw that it was really only using one result and +# not some form or average. I rewrote that and it will now report back +# average from multiple successful consequitive checks which should give +# much smoother results. It also means that --pcount option will now +# be used to specify how many sets of data will be used for average +# even if temporary file is used to store results. +# 01/08/08 - Bug fixes in new algorithm +# [2.15] 01/12/08 - Fixed so that port speed table is not retrieved unless +# options -Y or -u or -S are given. Also fixed to make sure portpseed +# performance variable is only reported when '-S' option is given +# (however for caching speed data is also in 'cache_int_speed') +# [2.16] 02/03/08 - Bug fixed in how timestamp array is saved by new algorithm, +# it would have resulted in only up to 2 previous data being used properly +# even if > 2 are actually available +# [2.17] 04/02/08 - Bug fixes related to STP and Cisco port data extensions for +# cases when no data is returned for some or all of the ports +# [2.18] 04/03/08 - Rewrite of cisco additional port status data extensions. +# Now 3 tables: portOperStatus=1.3.6.1.4.1.9.5.1.4.1.1.6 +# portLinkFaultStatus = 1.3.6.1.4.1.9.5.1.4.1.1.22 +# portAdditionalOperStatus = 1.3.6.1.4.1.9.5.1.4.1.1.23 +# are supported but user can specify as option to --cisco= which one +# is to be retrieved. When its not specified the plugin defaults to +# "auto" mode (unless --cisco=noauto is used) and will try to retrieve +# data for all 3 tables, check which data is available and then +# cache these results and in the future only retrieve tables that +# returned some data. This behavior should work with all cisco switches +# and not only with cisco catos models. But be warned about bugs in +# complex behavior such as this... +# [2.19] 04/06/08 - For STP port changes previous state is now reported in +# the output (instead of just saying STP changed) +# +# [2.20] 04/10/08 - Releasing 2.2 version as stable. No code changes but +# documentation above has been updated +# [2.201] 04/15/08 - Minor results text info issue (',' was not added before operstatus) +# [2.21] 06/10/08 - Minor fixes. Some documentation cleanup. +# Option -S extended to allow specifying expected interface +# speed with critical alert if speed is not what is specified +# [2.22] 10/20/08 - Added support for "-D" option (dormant state of ISDN) +# [2.23] 10/22/08 - Code fixes submitted or suggested by Bryan Leaman: +# - Fix to write data to new file, for those using file +# (instead of perfdata MACRO) for previous data +# - _out_bps fixes for those who want to use that directly +# for graphing instead of octet counter +# - New option '-Z' to have octet count in performance data +# instead of having this data by default (though this data +# is required and added automaticly with -P option) +# +# [2.3] 12/15/10 - Various small fixes. Plus a patch sent by Tristan Horn to better +# support minimum and maximum warning and critical thresholds +# [2.31] 01/10/11 - Bug fix when reporting in_prct/out_prct performance metric +# [2.32] 12/22/11 - Fixes for bugs reported by Joe Trungale and Nicolas Parpandet +# Updates to check on existance of utils.pm and use but not require it +# Patch by Steve Hanselman that adds "-I" option: +# "I’ve attached a patch that adds an option to ignore interface status +# (this is useful when you’re monitoring a switch with user devices +# attached that randomly power on and off but you still want +# performance stats and alerts on bandwidth when in use)." +# [2.34] 12/25/11 - Based on comments/requests on nagiosexchange, -z option has been added. +# This option makes specifying thresholds with -w and/or -c optional +# for those who want to use plugin primarily for data collection +# and graphing. This was (and still can be) accomplished before by +# specifying threshold value as 0 and then its not checked. Also the +# use of -w and -c is unnecessary if you do not use -k or -q options. +# [2.35] 04/19/12 - Added patch by Sébastien PRUD'HOMME which incorporates changes +# and bug fixes in revsions 1.23 and 1.19 of check_snmp_int (done +# after this plugin deverged from it) into this plugin as well. +# The changes add proper support for 64-bit counters when -g +# option is used and fix a bug when output is in % / perf in Bytes. +# [2.36] 06/15/12 - 1) Added fixes suggested in modified version of this plugin created +# by Yannick Charton to remove ^@ (NULL ?) and other not-ASCII +# characters at the end of the interface description. This allows +# to correctly match the network interface on some Windows servers. +# 2) Extended '-v' (debug/verbose) option so that instead of writing +# to STDOUT people could specify a file to write debug output to. +# 3) Using of quotewords() in prev_perf as suggested by Nicholas Scott +# allows to work with interfaces that have space in their name. +# Due to this plugin now require Text::ParseWords perl library. +# 4) List of contributors created as a separate header section below. +# +# [2.4] alpha/beta This version will support getting data from network interfaces on +# the local machine rather than just by SNMP. It will also support +# gettind data on average traffic (50-percentalile) and being able +# to specify threshold based on deviation from this average. There +# will be several alpha and beta versions before official 2.4 release. +# +# Specific info on each alpha/beta release will be removed in the future and only +# summary of below will appear in documentation on features and updated in 2.4 release +# +# 2.4a1 - 07/07/12 - Implemented in this release: +# 1) The plugin has been renamed "check_netint" from "check_snmp_netint". +# It can now check interfaces on a local Linux system and in the +# future FreeBSD, Sun and other systems maybe supported too. +# Checking of local interfaces happens if you do not specify -H and +# snmp (-O, -C, etc) options and if plugin name does not start with +# "check_snmp". For local interfaces options like --stp and --cisco +# are not valid, and optimization options -m and -mm also do not +# work same way since plugin will always get all data locally. +# 2) The plugin no longer requires -n (interface name) option +# though it is still recommended you use it. When interface +# name is not specified, all interfaces will be checked. +# 3) If with -F option directory is given instead of a file this +# will become base directory to write temporary file to. +# 4) Many doc and code fixes and cleanups all over +# 2.4a2 - 08/15/12 - Fixed bug with cache of previous data for SNMP queries that came +# around due to change in logic and introduction of non-SNMP. +# Added experimental support for future Nagios SAVEDDATA feature +# (plugin output after || after perfdata) enabled with --nagios_with_saveddata +# 2.4a3 - 09/13/12 - Patch/contrib by Franky Van Liedekerke: +# 1) Added option --admindown_ok: when checking for operational UP +# interfaces, the interfaces that are administratively down are OK +# 2) The '-z' option now also prevents tmp files of being written +# and removes the 'no usable data' warning because of missing perf data +# 2.4a4 - 09/28/12 - Additional patch by FVL for --admindown_ok. Option also got -K one-letter alias. +# Also added determining interface speed on linux with ethtool and iwconfig.. +# 2.4a5 - 10/10/12 - Interface speed can now be specified with -S/--intspeed option for use when +# plugin can not find speed by itsef or when what it finds is wrong. Previously +# this option was used to warn if speed is not what is expected, To do this now +# requires using prefix WARNING<> or CRITICAL<> before actual speed which is +# an incompatible change to preious format of this option. +# 2.4a6 - 11/17/12 - Changed ok interval from 0.9*delta - 3*delta to 0.75*delta - 4*delta. +# Fixed bug that would not output interface speed percent data in +# perf unless both -y and -u were used together. This bug was introduced +# somewhere around 2.2 and apparently 2.31 did not entirely fix it +# 2.4a7 - 11/18/12 - Added support for SNMP bulk requests and --bulk_snmp_queries option +# 2.4a8 - 11/21/12 - Another major code refactoring work to separate snmp-specific query +# code into its own function (as well as new ifconfig processing +# for linux local checks into its own function). +# 2.4a9 - 11/30/12 - prev_perf() function added in place of directly accessing prev_perf hash +# message size is reset to 5 times the default with --bulk_snmp_queries +# +# ============================ LIST OF CONTRIBUTORS =============================== +# +# The following individuals have contributed code, patches, bug fixes and ideas to +# this plugin (listed in last-name alphabetical order): +# +# M. Berger +# Yannick Charton +# Steve Hanselman +# Tristan Horn +# J. Jungmann +# Bryan Leaman +# William Leibzon +# R. Leroy +# S. Probst +# Patrick Proy +# Sébastien PRUD'HOMME +# Nicholas Scott +# Franky Van Liedekerke +# +# Open source community is grateful for all your contributions. +# +# ============================ START OF PROGRAM CODE ============================= + +use strict; +use Getopt::Long; +use Text::ParseWords; + +# Nagios specific +use lib "/usr/lib/nagios/plugins"; +our $TIMEOUT; +our %ERRORS; +eval 'use utils qw(%ERRORS $TIMEOUT)'; +if ($@) { + $TIMEOUT = 10; + %ERRORS = ('OK'=>0,'WARNING'=>1,'CRITICAL'=>2,'UNKNOWN'=>3,'DEPENDENT'=>4); +} + +our $do_snmp=1; +eval 'use Net::SNMP'; +if ($@) { + $do_snmp=0; +} + +# Version +my $Version='2.4'; + +############### BASE DIRECTORY FOR TEMP FILE (override this with -F) ######## +my $o_base_dir="/tmp/tmp_Nagios_int."; +my $file_history=200; # number of lines of data to keep in file + +# SNMP OID Datas +my $inter_table= '.1.3.6.1.2.1.2.2.1'; +my $index_table = '1.3.6.1.2.1.2.2.1.1'; +my $descr_table = '1.3.6.1.2.1.2.2.1.2'; +my $oper_table = '1.3.6.1.2.1.2.2.1.8.'; +my $admin_table = '1.3.6.1.2.1.2.2.1.7.'; +my $speed_table = '1.3.6.1.2.1.2.2.1.5.'; +my $speed_table_64 = '1.3.6.1.2.1.31.1.1.1.15.'; +my $in_octet_table = '1.3.6.1.2.1.2.2.1.10.'; +my $in_octet_table_64 = '1.3.6.1.2.1.31.1.1.1.6.'; +my $in_error_table = '1.3.6.1.2.1.2.2.1.14.'; +my $in_discard_table = '1.3.6.1.2.1.2.2.1.13.'; +my $out_octet_table = '1.3.6.1.2.1.2.2.1.16.'; +my $out_octet_table_64 = '1.3.6.1.2.1.31.1.1.1.10.'; +my $out_error_table = '1.3.6.1.2.1.2.2.1.20.'; +my $out_discard_table = '1.3.6.1.2.1.2.2.1.19.'; + +# WL: I reversed hash array meaning so that human-redable name can be used to assign status instead +# of numeric value. Second array is used for reversing it back when doing status line output +my %status=('UP'=>1,'DOWN'=>2,'TESTING'=>3,'UNKNOWN'=>4,'DORMANT'=>5,'NotPresent'=>6,'lowerLayerDown'=>7); +my %status_print=(1=>'UP',2=>'DOWN',3=>'TESTING',4=>'UNKNOWN',5=>'DORMANT',6=>'NotPresent',7=>'lowerLayerDown'); + +# WL: These are in use for Cisco CATOS special hacks, enable use with "--cisco" +my $cisco_port_name_table='1.3.6.1.4.1.9.5.1.4.1.1.4'; # table of port names (the ones you set with 'set port name') +my $cisco_port_ifindex_map='1.3.6.1.4.1.9.5.1.4.1.1.11'; # map from cisco port table to normal SNMP ifindex table +my $cisco_port_linkfaultstatus_table='1.3.6.1.4.1.9.5.1.4.1.1.22.'; # see table below for possible codes +my $cisco_port_operstatus_table='1.3.6.1.4.1.9.5.1.4.1.1.6.' ;; # see table below for possible values +my $cisco_port_addoperstatus_table='1.3.6.1.4.1.9.5.1.4.1.1.23.'; # see table below for possible codes +# codes are as of July 2007 (just in case cisco updates MIB and somebody is working with this plugin later) +my %cisco_port_linkfaultstatus=(1=>'UP',2=>'nearEndFault',3=>'nearEndConfigFail',4=>'farEndDisable',5=>'farEndFault',6=>'farEndConfigFail',7=>'otherFailure'); +my %cisco_port_operstatus=(0=>'operstatus:unknown',1=>'operstatus:other',2=>'operstatus:ok',3=>'operstatus:minorFault',4=>'operstatus:majorFault'); +my %cisco_port_addoperstatus=(0=>'other',1=>'connected',2=>'standby',3=>'faulty',4=>'notConnected',5=>'inactive',6=>'shutdown',7=>'dripDis',8=>'disable',9=>'monitor',10=>'errdisable',11=>'linkFaulty',12=>'onHook',13=>'offHook',14=>'reflector'); + +# STP Information (only tested with Cisco but should work with other vendor switches too) +my $stp_dot1dbase_ifindex_map='1.3.6.1.2.1.17.1.4.1.2'; # map from dot1base port table to SNMP ifindex table +my $stp_dot1dbase_portstate='1.3.6.1.2.1.17.2.15.1.3.'; # stp port states +my %stp_portstate=('0'=>'unknown',1=>'disabled',2=>'blocking',3=>'listening',4=>'learning',5=>'forwarding',6=>'broken'); +my %stp_portstate_reverse=(); # becomes reverse of above, i.e. 'disabled'=>1, etc + +# Standard options +my $o_host = undef; # hostname +my $o_timeout= undef; # Timeout (Default 10) +my $o_descr = undef; # description filter +my $o_help= undef; # wan't some help ? +my $o_admin= undef; # admin status instead of oper +my $o_inverse= undef; # Critical when up +my $o_ignorestatus= undef; # Ignore interface NAK status, always report OK +my $o_dormant= undef; # Dormant state is OK +my $o_verb= undef; # verbose mode/debug file name +my $o_version= undef; # print version +my $o_noreg= undef; # Do not use Regexp for name +my $o_short= undef; # set maximum of n chars to be displayed +my $o_label= undef; # add label before speed (in, out, etc...). +my $o_admindown_ok= undef; # admin down is ok (usefull when checking operational status) + +# Speed/error checks +my $o_warn_opt= undef; # warning options +my $o_crit_opt= undef; # critical options +my @o_warn_min= undef; # warning levels of perfcheck +my @o_warn_max= undef; # warning levels of perfcheck +my @o_crit_min= undef; # critical levels of perfcheck +my @o_crit_max= undef; # critical levels of perfcheck +my $o_checkperf= undef; # checks in/out/err/disc values +my $o_delta= 300; # delta of time of perfcheck (default 5min) +my $o_ext_checkperf= undef; # extended perf checks (+error+discard) +my $o_highperf= undef; # Use 64 bits counters +my $o_meg= undef; # output in MBytes or Mbits (-M) +my $o_gig= undef; # output in GBytes or Gbits (-G) +my $o_prct= undef; # output in % of max speed (-u) +my $o_kbits= undef; # Warn and critical in Kbits instead of KBytes +my $o_zerothresholds= undef; # If warn/crit are not specified, assume its 0 + +# Average Traffic Calculation Options (new option for upcoming 2.4 beta) +my $o_timeavg_opt= undef; # New option that allows to keep track of average traffic + # (50 percentile) over longer period and to set + # threshold based on deviation from this average +my $o_atime_nchecks_opt= undef; # Minimum number of samples for threshold to take affect + # (2 numbers: one fo take affect in addition to regular + # threshold, 2nd number is to take + +# Performance data options +my $o_perf= undef; # Output performance data +my $o_perfe= undef; # Output discard/error also in perf data +my $o_perfp= undef; # output performance data in % of max speed (-y) +my $o_perfr= undef; # output performance data in bits/s or Bytes/s (-Y) +my $o_perfo= undef; # output performance data in octets (-Z) +my $o_intspeed= undef; # include speed in performance output (-S), specify speed + +# WL: These are for previous performance data that nagios can send data to the plugin +# with $SERVICEPERFDATA$ macro (and $SERVICESAVEDDATA$ for future naios versions). +# This allows to calculate traffic without temporary file and also used to cache +# SNMP table info so as not to retreive it every time +my $o_prevperf= undef; # performance data given with $SERVICEPERFDATA$ macro +my $o_prevtime= undef; # previous time plugin was run $LASTSERVICECHECK$ macro +my @o_minsnmp= (); # see below +my $o_minsnmp= undef; # minimize number of snmp queries +my $o_maxminsnmp= undef; # minimize number of snmp queries even futher (slightly less safe in case of switch config changes) +my $o_bulksnmp= undef; # do snmp bulk request +my $o_filestore= ""; # path of the file to store cached data in - overrides $o_base_dir +my $o_pcount= 2; # how many sets of previous data should be in performance data +my $o_nagios_saveddata= undef; # enabled SAVEDDATA special output after || + +# These are unrelated WL's contribs to override default description OID 1.3.6.1.2.1.2.2.1.2 and for stp and cisco m[a|y]stery +my $o_descroid= undef; # description oid, overrides $descr_table +my $o_commentoid= undef; # comment text oid, kind-of like additional label text +my $o_ciscocat= undef; # enable special cisco catos hacks +my %o_cisco= (); # cisco options +my $o_stp= undef; # stp support option + +# Login and other options specific to SNMP +my $o_port = 161; # SNMP port +my $o_octetlength= undef; # SNMP Message size parameter (Makina Corpus contrib) +my $o_community = undef; # community +my $o_version2 = undef; # use snmp v2c +my $o_login= undef; # Login for snmpv3 +my $o_passwd= undef; # Pass for snmpv3 +my $v3protocols= undef; # V3 protocol list. +my $o_authproto= 'md5'; # Auth protocol +my $o_privproto= 'des'; # Priv protocol +my $o_privpass= undef; # priv password + +# Readable names for counters (M. Berger contrib) +my @countername = ( "in=" , "out=" , "errors-in=" , "errors-out=" , "discard-in=" , "discard-out=" ); +my $checkperf_out_desc; + +## Additional global variables +my %prev_perf_data=(); # array that is populated with previous performance data +my @prev_time= (); # timestamps if more then one set of previois performance data +my $perfcache_time=undef; # time when data was cached +my $perfcache_recache_trigger=43200; # How many seconds to use cached data for (default 12 hours for -m) +my $perfcache_recache_max=259200; # and 3 days for -mm (minmize most) +my $timenow=time(); # This used to be defined later but easier if moved to the top +my $stp_warntime=900; # Warn in case of change in STP state in last 15 minutes +my $check_speed=0; # If '-Y', '-u' or '-S' options are given this is set to 1 +my $specified_speed=0; # if -S has interface speed specified, this is set to speed specified there +my $speed_alert=undef; # if -S has alert specified, this is alert to issue if interface speed is not what is expected +my $shell_pid=undef; # defined only if run locally +my $snmp_session_v=0; # if no snmp session, its 0, otherwise 1 2 or 3 depending on version of SNMP session opened +my $num_int = 0; # number of interfaces that have matched +my @interfaces=(); # main array for interfaces data +# separated arrays that existed before that were replaced by above common array of structure/hash +# my @descr = (); --> $descr[$i] is now $interfaces[$i]{'descr'} +# my @portspeed=(); --> $portspeed[$i] is now $interfaces[$i]{'portspeed'} +my $perf_out=''; # performance data accumulator +my $saved_out=''; # saved data accumulator (added to perf) + +# Functions +sub read_file { + # Input : File, items_number + # Returns : array of value : [line][item] + my ($traffic_file,$items_number)=@_; + my ($ligne,$n_rows)=(undef,0); + my (@last_values,@file_values,$i); + open(FILE,"<".$traffic_file) || return (1,0,0); + + while($ligne = ) + { + chomp($ligne); + @file_values = split(":",$ligne); + #verb("@file_values"); + if ($#file_values >= ($items_number-1)) { + # check if there is enough data, else ignore line + for ( $i=0 ; $i< $items_number ; $i++ ) {$last_values[$n_rows][$i]=$file_values[$i];} + $n_rows++; + } + } + close FILE; + if ($n_rows != 0) { + return (0,$n_rows,@last_values); + } else { + return (1,0,0); + } +} + +sub write_file { + # Input : file , rows, items, array of value : [line][item] + # Returns : 0 / OK, 1 / error + my ($file_out,$rows,$item,@file_values)=@_; + my $start_line= ($rows > $file_history) ? $rows - $file_history : 0; + if ( open(FILE2,">".$file_out) ) { + for (my $i=$start_line;$i<$rows;$i++) { + for (my $j=0;$j<$item;$j++) { + print FILE2 $file_values[$i][$j]; + if ($j != ($item -1)) { print FILE2 ":" }; + } + print FILE2 "\n"; + } + close FILE2; + return 0; + } else { + return 1; + } +} + +sub p_version { print "check_snmp_netint version : $Version\n"; } + +sub print_usage { + print "Usage: $0 [-v [debugfilename]] -H (-C [-2]) | (-l login -x passwd [-X pass -L ,) [-p ] [-N ] -n [-O ] [-I] [-i | -a | -D | -K] [-r] [-f[eSyYZ] [-P ] [-T ] [--pcount=]] [-k[qBMGu] [-S [intspeed]] -g [-w -c [-z]| -z] -d] [-o ] [-m|-mm] [-t ] [-s] [--label] [--cisco=[oper,][addoper,][linkfault,][use_portnames|show_portnames]] [--stp[=]] [-V]\n"; +} + +sub isnnum { # Return true if arg is not a number + my $num = shift; + if ( $num =~ /^(\d+\.?\d*)|(^\.\d+)$/ ) { return 0 ;} + return 1; +} + +sub ascii_to_hex { # Convert each ASCII character to a two-digit hex number [WL] + (my $str = shift) =~ s/(.|\n)/sprintf("%02lx", ord $1)/eg; + return $str; +} + +sub help { + print "\nNetwork Interfaces Monitor Plugin for Nagios (check_netint/check_snmp_netint) v. ",$Version,"\n"; + print "GPL 2.0 or 3.0 licence, (c)2004-2007 Patrick Proy, (c)2007-2012 William Leibzon\n"; + print "Please see documentation for a full list of authors and contributors\n\n"; + print_usage(); + print < chars of the interface(s) + If the number is negative, then get the LAST characters. +-K, --admindown_ok + Indicate that administrative down status is OK for operational + interfaces that are down + +Note : when multiple interfaces are selected with regexp, + all must be up (or down with -i) to get an OK result. + +Threshold Checks and Performance Data options: + +-f, --perfparse + Perfparse compatible output (no output when interface is down). +-e, --error + Add error & discard to Perfparse output +-S, --intspeed[=[OK|WARNING|CRITICAL<>]1000000Kb|100000000Kb|100000000Kb|10Mb|100Mb|1000Mb] + If -S is used by itself, this adds speed in performance output in bits/s. + This option also allows to specify speed of the interface if it can not be found by plugin + or if what plugin determined is wrong. Be carefull, if you're checking multiple interfaces, + this will override and set speed for all of them. Additionally if you prefix speed with + WARNING<> or CRITICAL<> the alert will be issued if speed is not what is expected. + IMPORTANT: Prior to version 2.4 of this plugin if you specified speed after --intspeed= + then instead of overriding it would issue a critical alert if speed is not + the same (CRITICAL<> behavior). You must specify alert type before speed now. +-y, --perfprct ; -Y, --perfspeed ; -Z, --perfoctet + -y : output performance data in % of max speed + -Y : output performance data in bits/s or Bytes/s (depending on -B) + -Z : output performance data in octets i.e. bytes (always so with -P) +-k, --perfcheck ; -q, --extperfcheck + -k check the input/ouput bandwidth of the interface + -q also check the error and discard input/output +--label + Add label before speed in output : in=, out=, errors-out=, etc... +-B, --kbits + Make the warning and critical levels in K|M|G Bits/s instead of K|M|G Bytes/s +-G, --giga ; -M, --mega ; -u, --prct + -G : Make the warning and critical levels in Gbps (with -B) or GBps + -M : Make the warning and critical levels in Mbps (with -B) or MBps + -u : Make the warning and critical levels in % of reported interface speed. +-w, --warning=input,output[,error in,error out,discard in,discard out] + warning level for input / output bandwidth (0 for no warning) + unit depends on B,M,G,u options + warning for error & discard input / output in error/min (need -q) +-c, --critical=input,output[,error in,error out,discard in,discard out] + critical level for input / output bandwidth (0 for no critical) + unit depends on B,M,G,u options + critical for error & discard input / output in error/min (need -q) +-z, --zerothresholds + if warning and/or critical thresholds are not specified, assume they are 0 + i.e. do not check thresholds, but still give input/ouput bandwidth for graphing + This option also prevents tmp files of being writtena. + +Options for saving results of previous checks to calculate Traffic & Utilization: + +-P, --prev_perfdata + Previous performance data (normally put '-P \$SERVICEPERFDATA\$' in nagios + command definition). This is used in place of temporary file that otherwise + could be needed when you want to calculate utilization of the interface + Also used to cache data about which OIDs to lookup instead of having + to check interface names table each time. +-T, --prev_checktime + This is used with -P and is a previous time plugin data was obtained, + use it as '-T \$LASTSERVICECHECK\$'. This is now optional as plugin + will also save time as part of performance variables and get it with -P +--pcount=INTEGER + Default: 2 + How many sets of previus data to keep as performance data. By keeping + at least couple sets allows for more realistic and less 'bursty' results + but nagios has buffer limits so very large output of performance data + would not be kept. +-d, --delta=seconds + Default: 300 seconds = 5 minutes + Expected time between checks in seconds. Used for making sure traffic + can be calculated properly. If plugin receives is run more often than + 0.75 of specified value, it'll not use results but keep previous data + for later check. If it is run and receives results more than 4 times + later then this value, it'll discard all previous performance data + and start calculation again next time it is run. +-F, --filestore[=|] + When you use -P option that causes plugin to use previous performance data + that is passed as an argument to plugin to calculate in/out bandwidth + instead of storing data in temporary file. This option is an alternative + to -P and causes results of previous check to be saved in a file. + If this option has a parameter and it is a directory, then temporary + file will be saved in this directory, otherwise /tmp is used. + If parameter is a filename then it is used as a first part in + how temporary file is named. +--nagios_with_saveddata + Enables experimental support for future Nagios SAVEDATA (output after ||) + where cached data for next plugin use goes to special buffer and not PERFDATA + [THIS IS AN EXPERIMENTAL OPTION THAT MAY BE REMOVED OR RENAMED IN THE FUTURE] + +SNMP Authentication options and options valid only with SNMP: + +-H, --hostname=HOST + name or IP address of host to check +-C, --community=COMMUNITY NAME + community name for the SNMP agent (used with v1 or v2c protocols) +-2, --v2c + use snmp v2c (can not be used with -l, -x) +-l, --login=LOGIN ; -x, --passwd=PASSWD + Login and auth password for snmpv3 authentication + If no priv password exists, implies AuthNoPriv +-X, --privpass=PASSWD + Priv password for snmpv3 (AuthPriv protocol) +-L, --protocols=, + : Authentication protocol (md5|sha : default md5) + : Priv protocols (des|aes : default des) +-p, --port=PORT + SNMP port (Default 161) +-o, --octetlength=INTEGER + Max-size of the SNMP message. Usefull in case of too long responses. + Be carefull with network filters. Range 484 - 65535, default are + usually 1472,1452,1460 or 1440. +-N, --descrname_oid=OID + SNMP OID of the description table (optional for non-standard equipment) +-O, --optionaltext_oid=OID + SNMP OID for additional optional commentary text name for each interface + This is added into output as interface "label" (but it is not matched on). +-g, --64bits + Use 64 bits counters instead of the standard counters when checking + bandwidth & performance data for interface >= 1Gbps. + You must use snmp v2c or v3 to get 64 bits counters. +-m, --minimize_queries | -mm, --minimum_queries + Optimization options to minimize number of SNMP queries. + This is done by saving table ids in performance data (see -P above) and doing + all SNMP checks together. When "-mm" or "--minimum_queries" option is used + the number of queries is even smaller but there are no checks done to make + sure ifindex description is still the same (not safe only if you add vlans) +--bulk_snmp_queries + Enables using GET_BULK_REQUEST (SNMP v2 and v3 only) to get data + While this may work and be faster on some systems, it fails on others +--cisco=[oper,][addoper,][linkfault,][use_portnames|show_portnames] + This enables special cisco snmp additions which: + 1) Provide extra detail on operational and fault status for physical ports. + There are 3 tables that are available: 'operStatus','AdditionalOperStatus', + 'LinkFaultStatus' (some switches have one, some may have all 3), if you do + not specify an attempt will be made for all tables but if caching is used + what is found available will be cached for future requests. + 2) Optional "use_portnames" argument means that instead of using normal SNMP + description OID table (or the one you could supply with -N) it would match + names with port description names that you set with with 'set port name', + this does however restrict to only cisco module ports (ifindex maybe larger + and include also non-port interfaces such as vlan). + 3) Optional "show_portname" causes port names to go as comments (do not use with -O) +--stp[=disabled|blocking|listening|learning|forwarding|broken] + This enables reporting of STP (Spanning Tree Protocol) switch ports states. + If STP port state changes then plugin for period of time (default 15 minutes) + reports WARNING. Optional parameter after --stp= is expected STP state of + the port and plugin will return CRITICAL error if its anything else. +EOT +} + +# For verbose output (updated 06/06/12 to write to debug file if specified) +sub verb { + my $t=shift; + if (defined($o_verb)) { + if ($o_verb eq "") { + print $t, "\n"; + } + else { + if (!open (DEBUGFILE, ">>$o_verb")) { + print $t, "\n"; + } + else { + print DEBUGFILE $t,"\n"; + close DEBUGFILE; + } + } + } +} + +# Load previous performance data +# 05/20/12: modified to use quotewords as suggested by Nicholas Scott +sub process_perf { + my %pdh; + my ($nm,$dt); + use Text::ParseWords; + foreach (quotewords('\s+',1,$_[0])) { + if (/(.*)=(.*)/) { + ($nm,$dt)=($1,$2); + verb("prev_perf: $nm = $dt"); + # in some of my plugins time_ is to profile how long execution takes for some part of plugin + # $pdh{$nm}=$dt if $nm !~ /^time_/; + $pdh{$nm}=$dt; + $pdh{$nm}=$1 if $dt =~ /(\d+)c/; # 'c' is added as designation for octet + push @prev_time,$1 if $nm =~ /.*\.(\d+)/ && (!defined($prev_time[0]) || $prev_time[0] ne $1); # more then one set of previously cached performance data + } + } + return %pdh; +} + +# this is normal way check_snmp_int does it +# (function written by WL but based on previous code) +sub perf_name { + my ($iname,$vtype) = @_; + $iname =~ s/'\/\(\)/_/g; #' get rid of special characters in performance description name + return "'".$iname."_".$vtype."'"; +} + +# alternative function used by WL +sub perf_name2 { + my ($iname,$vtype) = @_; + $iname =~ s/'\/\(\)/_/g; #' + $iname =~ s/\s/_/g; + return $iname."_".$vtype; +} + +# This is used to lookup previous perf data. +# If only one argument is given, it is used as is. +# Otherwise perf_name is called with these two arguments to get the name +# so function does something similar to $prev_perf{perf_name($iname,$vtype)} +sub prev_perf { + my ($name, $vtype) = @_; + + $name = perf_name($name,$vtype) if defined($vtype); + return $prev_perf_data{$name} if exists($prev_perf_data{$name}); + # also lets check in case nagios stripped quotes out of perf + if ($name =~ /^'(.*)`$/) { + return $prev_perf_data{$1} if exists($prev_perf_data{$1}); + } + return undef; +} + +sub check_options { + Getopt::Long::Configure ("bundling"); + GetOptions( + 'v:s' => \$o_verb, 'verbose:s' => \$o_verb, "debug:s" => \$o_verb, + 'h' => \$o_help, 'help' => \$o_help, + 'H:s' => \$o_host, 'hostname:s' => \$o_host, + 'p:i' => \$o_port, 'port:i' => \$o_port, + 'n:s' => \$o_descr, 'name:s' => \$o_descr, + 'C:s' => \$o_community, 'community:s' => \$o_community, + '2' => \$o_version2, 'v2c' => \$o_version2, + 'l:s' => \$o_login, 'login:s' => \$o_login, + 'x:s' => \$o_passwd, 'passwd:s' => \$o_passwd, + 'X:s' => \$o_privpass, 'privpass:s' => \$o_privpass, + 'L:s' => \$v3protocols, 'protocols:s' => \$v3protocols, + 't:i' => \$o_timeout, 'timeout:i' => \$o_timeout, + 'i' => \$o_inverse, 'inverse' => \$o_inverse, + 'a' => \$o_admin, 'admin' => \$o_admin, + 'D' => \$o_dormant, 'dormant' => \$o_dormant, + 'I' => \$o_ignorestatus, 'ignorestatus' => \$o_ignorestatus, + 'K' => \$o_admindown_ok, 'admindown_ok' => \$o_admindown_ok, + 'r' => \$o_noreg, 'noregexp' => \$o_noreg, + 'V' => \$o_version, 'version' => \$o_version, + 'f' => \$o_perf, 'perfparse' => \$o_perf, + 'e' => \$o_perfe, 'error' => \$o_perfe, + 'k' => \$o_checkperf, 'perfcheck' => \$o_checkperf, + 'q' => \$o_ext_checkperf, 'extperfcheck' => \$o_ext_checkperf, + 'w:s' => \$o_warn_opt, 'warning:s' => \$o_warn_opt, + 'c:s' => \$o_crit_opt, 'critical:s' => \$o_crit_opt, + 'z' => \$o_zerothresholds, 'zerothresholds' => \$o_zerothresholds, + 'B' => \$o_kbits, 'kbits' => \$o_kbits, + 's:i' => \$o_short, 'short:i' => \$o_short, + 'g' => \$o_highperf, '64bits' => \$o_highperf, + 'S:s' => \$o_intspeed, 'intspeed:s' => \$o_intspeed, + 'y' => \$o_perfp, 'perfprct' => \$o_perfp, + 'Y' => \$o_perfr, 'perfspeed' => \$o_perfr, + 'Z' => \$o_perfo, 'perfoctet' => \$o_perfo, + 'M' => \$o_meg, 'mega' => \$o_meg, + 'G' => \$o_gig, 'giga' => \$o_gig, + 'u' => \$o_prct, 'prct' => \$o_prct, + 'o:i' => \$o_octetlength, 'octetlength:i' => \$o_octetlength, + 'label' => \$o_label, + 'd:i' => \$o_delta, 'delta:i' => \$o_delta, + 'N:s' => \$o_descroid, 'descrname_oid:s' => \$o_descroid, + 'O:s' => \$o_commentoid, 'optionaltext_oid:s' => \$o_commentoid, + 'P:s' => \$o_prevperf, 'prev_perfdata:s' => \$o_prevperf, + 'T:s' => \$o_prevtime, 'prev_checktime:s'=> \$o_prevtime, + 'pcount:i' => \$o_pcount, + 'm' => \@o_minsnmp, 'minimize_queries' => \$o_minsnmp, + 'minimum_queries' => \$o_maxminsnmp, 'bulk_snmp_queries' => \$o_bulksnmp, + 'F:s' => \$o_filestore, 'filestore:s' => \$o_filestore, + 'cisco:s' => \$o_ciscocat, 'stp:s' => \$o_stp, + 'nagios_with_saveddata' => \$o_nagios_saveddata + ); + if (defined ($o_help) ) { help(); exit $ERRORS{"UNKNOWN"}}; + if (defined($o_version)) { p_version(); exit $ERRORS{"UNKNOWN"}}; + + # check snmp information + # 06/25/12: this is now required only if plugin name starts with check_snmp or if host is specified) + if ($0 =~ /^check_snmp/ || defined($o_host)) { + if ($do_snmp==0) { print "Can't locate Net/SNMP.pm\n"; exit $ERRORS{"UNKNOWN"}} + if (defined($o_descroid)) { $descr_table = $o_descroid; } + if (!defined($o_descr) && !defined($o_community) && (!defined($o_login) || !defined($o_passwd)) ) + { print "Specify community and put snmp login info!\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + if ((defined($o_login) || defined($o_passwd)) && (defined($o_community) || defined($o_version2)) ) + { print "Can't mix snmp v1,2c,3 protocols!\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + if (defined ($v3protocols)) { + if (!defined($o_login)) { print "Put snmp V3 login info with protocols!\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + my @v3proto=split(/,/,$v3protocols); + if ((defined ($v3proto[0])) && ($v3proto[0] ne "")) {$o_authproto=$v3proto[0]; } # Auth protocol + if (defined ($v3proto[1])) {$o_privproto=$v3proto[1]; } # Priv protocol + if ((defined ($v3proto[1])) && (!defined($o_privpass))) + { print "Put snmp V3 priv login info with priv protocols!\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + } + if (defined($o_minsnmp[1])) { + $o_maxminsnmp=1; + } + elsif (defined($o_minsnmp[0])) { + $o_minsnmp=1; + } + if (defined($o_maxminsnmp)) { + if (defined($o_minsnmp)) { + print "You dont need to use -m when you already specified -mm."; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + else { + $o_minsnmp=1; + # $o_bulksnmp=1; + } + } + # Check snmpv2c or v3 with 64 bit counters + if (defined ($o_highperf) && (!defined($o_version2) && defined($o_community))) + { print "Can't get 64 bit counters with snmp version 1\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + if (defined ($o_highperf)) { + if (eval "require bigint") { + use bigint; + } else { print "Need bigint module for 64 bit counters\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + } + $perfcache_recache_trigger=$perfcache_recache_max if defined($o_maxminsnmp); + if (defined($o_commentoid)) { + if ($o_commentoid !~ /\./) { + print "Comment OID is not specified or is not valid\n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + $o_commentoid.='.' if $o_commentoid !~ /\.$/; + } + #### octet length checks + if (defined ($o_octetlength) && (isnnum($o_octetlength) || $o_octetlength > 65535 || $o_octetlength < 484 )) { + print "octet length must be < 65535 and > 484\n";print_usage(); exit $ERRORS{"UNKNOWN"}; + } + } + else { + $do_snmp=0; + if (defined($o_octetlength) || defined($o_highperf) || defined($o_maxminsnmp) || defined($o_minsnmp) || + defined($v3protocols) || defined($o_login) || defined($o_passwd) || defined($o_version2) || defined($o_community) || + defined($o_ciscocat) || defined($o_stp) || defined($o_commentoid) || defined($o_descroid)) { + print "Option you specified is only valid with SNMP. Maybe you forgot to specify hostname with -h?\n";print_usage(); exit $ERRORS{"UNKNOWN"}; + } + } + if (defined($o_timeout) && (isnnum($o_timeout) || ($o_timeout < 2) || ($o_timeout > 60))) + { print "Timeout must be >1 and <60 !\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + if (!defined($o_timeout)) {$o_timeout=5;} + + # check if -e without -f + if ( defined($o_perfe) && !defined($o_perf)) + { print "Cannot output error without -f option!\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + if (defined ($o_perfr) && defined($o_perfp) ) { + print "-Y and -y options are exclusives\n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + if ((defined ($o_perfr) || defined($o_perfp) || defined($o_perfo)) && !defined($o_checkperf)) { + print "Cannot put -Y or -y or -Z options without perf check option (-k) \n"; print_usage(); exit $ERRORS{"UNKNOWN"}} + if (defined ($o_short)) { + #TODO maybe some basic tests ? characters return empty string + } + if (defined($o_prevperf)) { + if (defined($o_perf)) { + %prev_perf_data=process_perf($o_prevperf); + # put last time nagios was checked in timestamp array + if (defined($prev_perf_data{'ptime'})) { + push @prev_time, $prev_perf_data{'ptime'}; + } + elsif (defined($o_prevtime)) { + push @prev_time, $o_prevtime; + $prev_perf_data{'ptime'}=$o_prevtime; + } + else { + @prev_time=(); + } + # numeric sort for timestamp array (this is from lowest time to highiest, i.e. to latest) + my %ptimes=(); + $ptimes{$_}=$_ foreach @prev_time; + @prev_time = sort { $a <=> $b } keys(%ptimes); + } + else { + print "need -f option first \n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + } + if (defined($o_prevtime) && !defined($o_prevperf)) + { + print "Specifying previous servicecheck is only necessary when you send previous performance data (-T)\n"; + print_usage(); exit $ERRORS{"UNKNOWN"}; + } + if (defined ($o_checkperf)) { + my @o_warn=(); + @o_warn=split(/,/,$o_warn_opt) if defined($o_warn_opt); + if (!defined($o_zerothresholds) && defined($o_ext_checkperf) && (!defined($o_warn_opt) || $#o_warn != 5)) { + print "Add '-z' or specify 6 warning levels for extended checks \n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + if (!defined($o_zerothresholds)&& !defined($o_ext_checkperf) && (!defined($o_warn_opt) || $#o_warn !=1 )){ + print "Add 'z' or specify 2 warning levels for bandwidth checks \n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + my @o_crit=(); + @o_crit=split(/,/,$o_crit_opt) if defined($o_crit_opt); + #verb(" $o_crit_opt :: $#o_crit : @o_crit"); + if (!defined($o_zerothresholds) && defined($o_ext_checkperf) && (!defined($o_crit_opt) || $#o_crit != 5)) { + print "Add '-z' or specify 6 critical levels for extended checks \n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + if (!defined($o_zerothresholds) && !defined($o_ext_checkperf) && (!defined($o_crit_opt) || $#o_crit !=1 )) { + print "Add '-z' or specify 2 critical levels for bandwidth checks \n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + for (my $i=0;$i<=$#o_warn;$i++) { + if ($o_warn[$i] =~ /^\d+$/) { + $o_warn_max[$i] = $o_warn[$i]; + } elsif ($o_warn[$i] =~ /^(\d+)?-(\d+)?$/) { + $o_warn_min[$i] = $1 if $1; + $o_warn_max[$i] = $2 if $2; + } else { + print "Can't parse warning level: $o_warn[$i]\n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + } + for (my $i=0;$i<=$#o_crit;$i++) { + if ($o_crit[$i] =~ /^\d+$/) { + $o_crit_max[$i] = $o_crit[$i]; + } elsif ($o_crit[$i] =~ /^(\d+)?-(\d+)?$/) { + $o_crit_min[$i] = $1 if $1; + $o_crit_max[$i] = $2 if $2; + } else { + print "Can't parse critical level: $o_crit[$i]\n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + } + for (my $i=0;$i<=$#o_warn;$i++) { + if (defined($o_crit_max[$i]) && defined($o_warn_max[$i]) && + $o_crit_max[$i] && $o_warn_max[$i] && $o_warn_max[$i] > $o_crit_max[$i]) { + print "Warning max must be < Critical max level \n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + if (defined($o_crit_min[$i]) && defined($o_warn_min[$i]) && + $o_warn_min[$i] && $o_crit_min[$i] && $o_warn_min[$i] < $o_crit_min[$i]) { + print "Warning min must be > Critical min level \n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + } + if ((defined ($o_meg) && defined($o_gig) ) || (defined ($o_meg) && defined($o_prct) )|| (defined ($o_gig) && defined($o_prct) )) { + print "-M -G and -u options are exclusives\n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + } + # cisco hacks to use or show user-specified port names (WL) + if (defined($o_ciscocat)) { + if (!defined($o_host)) { + print "Cisco option is only valid with SNMP when checking remote host\n";print_usage(); exit $ERRORS{"UNKNOWN"}; + } + $o_cisco{$_}=$_ foreach (split ',',$o_ciscocat); + if (defined($o_cisco{use_portnames})) { + if (defined($o_descroid)) { + print "Can not use -N when --cisco=use_portnames option is used\n"; print_usage(); exit $ERRORS{'UNKNOWN'}; + } + else { + $descr_table = $cisco_port_name_table; + } + } + elsif (defined($o_cisco{show_portnames})) { + if (defined($o_commentoid)) { + print "Can not use -O when --cisco=show_portnames option is used\n"; print_usage(); exit $ERRORS{'UNKNOWN'}; + } + else { + $o_commentoid = $cisco_port_name_table; + } + } + $o_cisco{auto}='auto' if (!defined($o_cisco{oper}) && !defined($o_cisco{addoper}) && !defined($o_cisco{linkfault}) &&!defined($o_cisco{noauto})); + verb("Cisco Options: ".join(',',keys %o_cisco)); + } + # stp support + if (defined($o_stp) && $o_stp ne '') { + if (!defined($o_host)) { + print "STP option is currently only valid with SNMP when checking remote host\n";print_usage(); exit $ERRORS{"UNKNOWN"}; + } + $stp_portstate_reverse{$stp_portstate{$_}}=$_ foreach keys %stp_portstate; + if (!defined($stp_portstate_reverse{$o_stp})) { + print "Incorrect STP state specified after --stp=\n"; print_usage(); exit $ERRORS{'UNKNOWN'}; + } + } + # interface speed + $check_speed = 1 if defined($o_prct) || defined($o_perfp); + if (defined($o_intspeed) && $o_intspeed =~ /(\d+)/) { + $specified_speed = $1; + $specified_speed = $specified_speed*1024 if $o_intspeed =~ /Kb?$/; + $specified_speed = $specified_speed*1024*1024 if $o_intspeed =~ /Mb?$/; + if ($o_intspeed =~ /^(.*)<>/) { + $speed_alert = $1; + $check_speed = 1; + if (!exists($ERRORS{$speed_alert})) { + print "Incorrect alert type $speed_alert specified at --intspeed=\n"; print_usage(); exit $ERRORS{'UNKNOWN'}; + } + if ($specified_speed==0) { + print "Must specify speed after alert type with --intspeed\n"; print_usage(); exit $ERRORS{"UNKNOWN"}; + } + } + else { + $check_speed = 0; # since we specified speed here, we don't need to check it + } + } +} + +# new function from code that checks if interface name matches name given with -n option +sub int_name_match { + my $name = shift; + + return 1 if !defined($o_descr); + # test by regexp or exact match + return ($name eq $o_descr) if defined($o_noreg); + return ($name =~ /$o_descr/); +} + +# new function that cleans interface name as it may appear in SNMP into what we use +sub clean_int_name { + my $name = shift; + + # below chop line is based on code by Y. Charton to remove ^@ (NULL ?) and other + # non-ASCII characters at the end of the interface description, this allows to + # correctly match interfaces for Windows servers, since they have buggy snmp + chop($name) if (ord(substr($name,-1,1)) > 127 || ord(substr($name,-1,1)) == 0 ); + + $name =~ s/[[:cntrl:]]//g; + chomp $name; + return $name; +} + +# function that opens proper session for SNMP v1/2/3 +sub create_snmp_session { + my ($session,$error); + + if ( defined($o_login) && defined($o_passwd)) { + # SNMPv3 login + if (!defined ($o_privpass)) { + verb("SNMPv3 AuthNoPriv login : $o_login, $o_authproto"); + ($session, $error) = Net::SNMP->session( + -hostname => $o_host, + -version => '3', + -port => $o_port, + -username => $o_login, + -authpassword => $o_passwd, + -authprotocol => $o_authproto, + -timeout => $o_timeout + ); + } else { + verb("SNMPv3 AuthPriv login : $o_login, $o_authproto, $o_privproto"); + ($session, $error) = Net::SNMP->session( + -hostname => $o_host, + -version => '3', + -username => $o_login, + -port => $o_port, + -authpassword => $o_passwd, + -authprotocol => $o_authproto, + -privpassword => $o_privpass, + -privprotocol => $o_privproto, + -timeout => $o_timeout + ); + } + $snmp_session_v = 3; + } else { + if (defined ($o_version2)) { + # SNMPv2c Login + verb("SNMP v2c login"); + ($session, $error) = Net::SNMP->session( + -hostname => $o_host, + -version => 2, + -community => $o_community, + -port => $o_port, + -timeout => $o_timeout + ); + $snmp_session_v = 2; + } else { + # SNMPV1 login + verb("SNMP v1 login"); + ($session, $error) = Net::SNMP->session( + -hostname => $o_host, + -community => $o_community, + -port => $o_port, + -timeout => $o_timeout + ); + $snmp_session_v = 1; + } + } + if (!defined($session)) { + printf("ERROR opening session: %s.\n", $error); + exit $ERRORS{"UNKNOWN"}; + } + if (defined($o_octetlength) || defined($o_bulksnmp)) { + my $oct_resultat=undef; + my $oct_test=$session->max_msg_size(); + verb(" actual max octets:: $oct_test"); + if (defined($o_octetlength)) { + $oct_resultat = $session->max_msg_size($o_octetlength); + } + else { # for bulksnmp we set message size to 5 times its default + $oct_resultat = $session->max_msg_size($oct_test * 5); + } + if (!defined($oct_resultat)) { + printf("ERROR: Session settings : %s.\n", $session->error); + $session->close; + exit $ERRORS{"UNKNOWN"}; + } + $oct_test= $session->max_msg_size(); + verb(" new max octets:: $oct_test"); + } + return $session; +} + +# function that does snmp get request for a list of OIDs +# 1st argument is session, 2nd is ref to list of OIDs, +# 3rd is optional text for error & debug info +# 4th argument is optional hash of array to be filled with results +sub snmp_get_request { + my ($session, $oids_ref, $table_name, $results) = @_; + my $result = undef; + + verb("Doing snmp request on ".$table_name." OIDs: ".join(' ',@{$oids_ref})); + if (defined($o_bulksnmp) && $snmp_session_v > 1) { + my @oids_bulk=(); + my ($oid_part1,$oid_part2); + foreach(@{$oids_ref}) { + if (/^(.*)\.(\d+)$/) { + $oid_part1=$1; + $oid_part2=$2; + $oid_part2-- if $oid_part2 ne '0'; + $oid_part1.='.'.$oid_part2; + push @oids_bulk,$oid_part1; + } + } + verb("Converted to bulk request on OIDs: ".join(' ',@oids_bulk)); + $result = $session->get_bulk_request( + -nonrepeaters => scalar(@oids_bulk), + -maxrepetitions => 0, + -varbindlist => \@oids_bulk, + ); + } + else { + $result = $session->get_request( + -varbindlist => $oids_ref + ); + } + if (!defined($result)) { + printf("SNMP ERROR getting %s : %s.\n", $table_name, $session->error); + $session->close; + exit $ERRORS{"UNKNOWN"}; + } + + verb("Finished SNMP request. Result contains ".scalar(keys %$result)." entries:"); + foreach(keys %$result) { + $results->{$_} = $result->{$_} if defined($results); + verb(" ".$_." = ".$result->{$_}); + } + + return $result; +} + +# executes shell command, returns results blob file pointer +sub exec_shell_command { + my $shell_command = shift; + + verb("Executing: $shell_command"); + $shell_pid=open(SHELL_DATA, "$shell_command 2>&1 |"); + if (!$shell_pid) { + print "UNKNOWN ERROR - could not execute $shell_command - $!"; + exit $ERRORS{'UNKNOWN'}; + } + return \*SHELL_DATA; +} + +# closes blob/file pointer, end of shell command execution +sub finish_shell_command { + my $file_ref = shift; + close $file_ref; + $shell_pid = undef; +} + +# this function gets all interface data on localhost +# by executing ifconfig and other commands as necessary +# for the machine and architecture its being run on +sub getdata_localhost { + my $linux_ifconfig = "/sbin/ifconfig"; + my $linux_ethtool = "/sbin/ethtool"; + my $linux_iwconfig = "/sbin/iwconfig"; + + # first find architecture we're running on + my $shell_command; + my $shell_ref = undef; + my $os = `uname`; + chomp $os; + + # Linux output of "ifconfig": + # + # wlan0 Link encap:Ethernet HWaddr a0:88:b4:97:9b:d4 + # inet addr:192.168.1.31 Bcast:192.168.1.255 Mask:255.255.255.0 + # inet6 addr: fe80::a288:b4ff:fe97:9bd4/64 Scope:Link + # UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 + # RX packets:3610846 errors:0 dropped:0 overruns:0 frame:0 + # TX packets:3054674 errors:0 dropped:0 overruns:0 carrier:0 + # collisions:0 txqueuelen:1000 + # RX bytes:2637787245 (2.6 GB) TX bytes:604337476 (604.3 MB) + # + # Linux output of "netstat -n -i": + # + # Kernel Interface table + # Iface MTU Met RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP TX-OVR Flg + # eth0 1500 0 0 0 0 0 0 0 0 0 BMU + # lo 16436 0 221065 0 0 0 221065 0 0 0 LRU + # wlan0 1500 0 3657422 0 0 0 3100992 0 0 0 BMRU + # + # (these are packets, so should multiply by MTU but size may actually different) + # + # Linux output of "ip link show" : + # + # 1: lo: mtu 16436 qdisc noqueue state UNKNOWN + # link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + # 2: eth0: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 + # link/ether f0:de:f1:76:83:df brd ff:ff:ff:ff:ff:ff + # 3: wlan0: mtu 1500 qdisc mq state UP qlen 1000 + # link/ether a0:88:b4:97:9b:d4 brd ff:ff:ff:ff:ff:ff + # 4: virbr0: mtu 1500 qdisc noqueue state DOWN + # link/ether 86:cd:48:f1:05:ad brd ff:ff:ff:ff:ff:ff + + # FREEBSD output of "ifconfig" : + # + # plip0: flags=8810 metric 0 mtu 1500 + # lo0: flags=8049 metric 0 mtu 16384 + # options=3 + # inet6 fe80::1%lo0 prefixlen 64 scopeid 0x4 + # inet6 ::1 prefixlen 128 + # inet 127.0.0.1 netmask 0xff000000 + # nd6 options=3 + # + # FREEBSD output of netstat: + # + # vpn8# netstat -w 1 + # input (Total) output + # packets errs bytes packets errs bytes colls + # 145159 0 82485349 153321 0 117743533 0 + # 136895 0 79084654 145018 0 111877348 0 + # 146279 0 81710082 153952 0 116857455 0 + # 143634 0 80602950 152005 0 116127801 0 + # 144205 0 81175701 151892 0 115836499 0 + # 146342 0 83031415 153968 0 118093209 0 + # 143365 0 80564430 151508 0 115702643 0 + # 145253 0 82320881 153062 0 116675147 0 + # 146909 0 81998714 155485 0 117155601 0 + # 91507 0 76590439 79098 0 35481375 0 + # 145148 0 81429871 153728 0 116617071 0 + # + # Note: I think it should have been "netstat -bni -I " ... + # + # Script for bandwidth from http://www.unix.com/unix-dummies-questions-answers/9528-commnad-getting-bandwidth-usage.html: + # open(NETSTAT, "/usr/bin/netstat -I vr0 1|") || die "$!"; + # + # for ($lineCount = 1; $lineCount <= 3; $lineCount++) { + # $inputLine = ; + # chomp($inputLine); + # if ($lineCount == 3) { + # $inputLine =~ s/\s+/-/g; + # my @column = split(/-/, $inputLine); + # $throughPut = ($column[6]*8)/1024; + # print "$throughPut\n"; + # }; + #}; + # #close(NETSTAT); + + # AIX ('entstat -d ent0' equivalent to ethtool + # San Solaris (link_speed will return 0 for 10mbit and 1 for 100mbit + # link_mode will return 0 for half duplex and 1 for full duplex) + # Example: if you want the speed for /dev/hme2, you issue: + # /usr/sbin/ndd -set /dev/hme instance 2 + # /usr/sbin/ndd /dev/hme link_speed + # Also see: https://cfengine.com/forum/read.php?3,27223 + + if ($os ne "Linux") { + printf("Only Linux is currently supported for local interfaces\n"); + exit $ERRORS{"UNKNOWN"}; + } + # This is for linux, may want to move to separate function later + else { + $shell_ref = exec_shell_command("$linux_ifconfig -a"); + + $num_int=0; + my $int_lines=0; + while (<$shell_ref>) { + chomp; + verb("got line: $_"); + my @words = split; + if (!$int_lines && scalar(@words)>2 && $words[1] eq 'Link') { + if (int_name_match($words[0])) { + $interfaces[$num_int] = { + 'descr' => $words[0], 'admin_up'=> $status{'DOWN'}, 'oper_up'=> $status{'DOWN'}, # considered common between SNMP and local checks + 'in_bytes' => 0, 'out_bytes' => 0, 'in_packets' => 0, 'out_packets' => 0, # considered common, but packets are not used + 'in_errors' => 0, 'out_errors' => 0, # considered common + 'in_dropped' => 0, 'out_dropped' => 0, # common, same as discards for SNMP + 'in_overruns' => 0, 'out_overruns' => 0, # added to errors if not 0 + 'collisions' => 0, 'txqueuelen' => 0, 'metric' => 0, 'MTU'=>0 # linux-specific names, not really used + }; + $int_lines=1; + } + } + elsif ($int_lines && scalar(@words)<2) { + $int_lines=0; + $num_int++; + } + elsif ($int_lines) { + my $prefix=""; + foreach(@words) { + if ($_ eq "RX") { $prefix = "in_"; } + elsif ($_ eq "TX") { $prefix = "out_"; } + elsif ($_ eq "UP") { $interfaces[$num_int]{'admin_up'} = $status{'UP'}; } + elsif ($_ eq "RUNNING") { $interfaces[$num_int]{'oper_up'} = $status{'UP'}; } + elsif ($_ =~ /^(.*):(\d+)/) { + verb(" interface #".$num_int." (".$interfaces[$num_int]{'descr'}.") : ".$prefix.$1." = ".$2); + $interfaces[$num_int]{$prefix.$1} = $2; + $interfaces[$num_int]{$prefix.'errors'} += $2 if ($1 eq 'overruns'); + } + } + } + } + finish_shell_command($shell_ref); + if ($check_speed) { + if (-f $linux_iwconfig) { + my %wint = (); + my $cint = undef; + $shell_ref = exec_shell_command($linux_iwconfig); + while (<$shell_ref>) { + chomp; + verb("got line: $_"); + if ($_ eq "") { + verb(" end interface $cint") if defined($cint); + $cint = undef if defined($cint); + } + elsif (/^(\w+)\s+IEEE/) { + verb(" start interface $1"); + $cint = $1; + } + elsif (/\s+Bit\sRate\=(\d+\.?\d?)\s+Mb/) { + verb(" speed of ".$cint." is ".$1." Mb/s") if defined($cint); + $wint{$cint} = $1 if defined($cint); + } + } + finish_shell_command($shell_ref); + if (scalar(keys %wint)>0) { + for (my $i=0;$i<$num_int;$i++) { + if (exists($wint{$interfaces[$i]{'descr'}})) { + $interfaces[$i]{'portspeed'} = $wint{$interfaces[$i]{'descr'}} *1024*1024; + verb(" speed of interface ".$interfaces[$i]{'descr'}." is ".$interfaces[$i]{'portspeed'}." bps"); + } + } + } + } + if (-f $linux_ethtool) { + for (my $i=0;$i < $num_int; $i++) { + if ($interfaces[$i]{'admin_up'} == $status{'UP'} && $interfaces[$i]{'oper_up'} == $status{'UP'}) { + $shell_ref = exec_shell_command("$linux_ethtool ".$interfaces[$i]{'descr'}); + while(<$shell_ref>) { + if (/Speed:\s(\d+)Mb/) { + $interfaces[$i]{'portspeed'} = $1 *1024*1024; + verb(" speed of interface ".$interfaces[$i]{'descr'}." is ".$interfaces[$i]{'portspeed'}." bps"); + } + } + finish_shell_command($shell_ref); + } + } + } + } + } +} + +# code that retrieves data by SNMP and populates interfaces array is now in this function +# instead of directly directly part of non-function main code below +sub getdata_snmp { + # global arrays of interface data used used for snmp retrieval + my $session = undef; + my @tindex = (); + my @oids = undef; + my @oids_admin = undef; + my @oid_descr=(); # this is actually only used with '-m' to double-check that cached index is correct + my @oid_speed=(); + my @oid_speed_high=(); + my @oid_commentlabel=(); + my @oid_ciscostatus=(); + my @oid_ciscofaultstatus=(); + my @oid_ciscooperstatus=(); + my @oid_ciscoaddoperstatus=(); + my @oid_stpstate=(); + my %cisco_timap=(); + my %stp_ifmap=(); + my @stpport=(); + my @cport=(); + my @portspeed=(); + my @descr=(); + my %copt=(); + my %copt_next=(); + my $results = {}; + my (@oid_perf,@oid_perf_outoct,@oid_perf_inoct,@oid_perf_inerr,@oid_perf_outerr,@oid_perf_indisc,@oid_perf_outdisc)= (undef,undef,undef,undef,undef,undef,undef); + my ($result1,$result2,$data1) = (undef,undef,undef); + my $int_status_extratext=""; + + # Create SNMP session + $session = create_snmp_session(); + + if (defined($o_minsnmp) && %prev_perf_data) { + # load old-style arrays + @tindex = split(',', prev_perf('cache_descr_ids')) if defined(prev_perf('cache_descr_ids')); + @cport = split(',', prev_perf('cache_descr_cport')) if defined(prev_perf('cache_descr_cport')); + @stpport = split(',', prev_perf('cache_descr_stpport')) if defined(prev_perf('cache_descr_stpport')); + @portspeed = split(',', prev_perf('cache_int_speed')) if defined(prev_perf('cache_int_speed')) && $specified_speed==0; + @descr = split(',', prev_perf('cache_descr_names')) if defined(prev_perf('cache_descr_names')); + + # clear old index if anything seems wrong with cached data + my %tindex_hash = map { $_ => 1 } @tindex; + @tindex = () if (scalar(@tindex) != scalar(keys %tindex_hash)) || # make sure no duplicates + (scalar(@tindex) != scalar(@descr)) || + (defined($o_ciscocat) && (!defined(prev_perf('cache_descr_cport')) || scalar(@tindex) != scalar(@cport))) || + (defined($o_stp) && (!defined(prev_perf('cache_descr_stpport')) || scalar(@tindex) != scalar(@stpport))) || + (defined(prev_perf('cache_int_speed')) && scalar(@tindex) != scalar(@portspeed)) || + # this checks that time of last saved indeces is not way too long ago, in which case we check them again + (!defined($perfcache_time) || $timenow < $perfcache_time || ($timenow - $perfcache_time) > $perfcache_recache_trigger); + + # load into our new array + for (my $i=0;$i $descr[$i]}; + $interfaces[$i]{'speed'} = $portspeed[$i] if defined(prev_perf('cache_int_speed')); + } + if (defined(prev_perf('cache_cisco_opt'))) { + $copt{$_}=$_ foreach(split ',',prev_perf('cache_cisco_opt')); + } + + if (scalar(@tindex)>0) { + $num_int = scalar(@tindex); + verb("Using cached data:"); + verb(" tindex=".join(',',@tindex)); + verb(" descr=".join(',',@descr)); + verb(" speed=".join(',',@portspeed)) if scalar(@portspeed)>0; + verb(" copt=".join(',',keys %copt)) if scalar(keys %copt)>0; + if (scalar(@cport)>0) { + verb(" cport=".join(',',@cport)); + @cport=() if $cport[0]==-1; # perf data with previous check done with --cisco but no cisco data was found + } + if (scalar(@stpport)>0) { + verb(" stpport=".join(',',@stpport)); + @stpport=() if $stpport[0]==-1; # perf data with previous check done with --stp but no stp data was found + } + } + } + + if (scalar(@tindex)==0) { + # WL: Get cisco port->ifindex map table + if (defined($o_ciscocat)) { + $result2 = $session->get_table( + -baseoid => $cisco_port_ifindex_map + ); + if (!defined($result2)) { + printf("ERROR: Cisco port-index map table : %s.\n", $session->error); + $session->close; + exit $ERRORS{"UNKNOWN"}; + } + foreach (keys %$result2) { + $cisco_timap{$result2->{$_}}=$1 if /$cisco_port_ifindex_map\.(.*)/; + } + } + # WL: Get stp port->ifindex map table + if (defined($o_stp)) { + $result1 = $session->get_table( + -baseoid => $stp_dot1dbase_ifindex_map + ); + if (!defined($result1)) { + printf("ERROR: STP port-index map table : %s.\n", $session->error); + $session->close; + exit $ERRORS{"UNKNOWN"}; + } + foreach (keys %$result1) { + $stp_ifmap{$result1->{$_}}=$1 if /$stp_dot1dbase_ifindex_map\.(.*)/; + } + } + $perfcache_time = $timenow; + verb("Getting Interfaces Description Table ($descr_table):"); + # Get description table + $result1 = $session->get_table( + -baseoid => $descr_table + ); + if (!defined($result1)) { + printf("ERROR: Description table : %s.\n", $session->error); + $session->close; + exit $ERRORS{"UNKNOWN"}; + } + # Select interface by regexp of exact match and put the oid to query in an array + foreach my $key (keys %$result1) { + $data1 = clean_int_name($result1->{$key}); + verb(" OID : $key, Clean Desc : $data1, Raw Desc: ".$result1->{$key}); + + if (int_name_match($data1) && $key =~ /$descr_table\.(.*)/) { + $interfaces[$num_int] = { 'descr' => '', 'admin_up' => 0, 'oper_up' => 0, 'in_bytes' => 0, 'out_bytes' => 0, + 'in_packets' => 0, 'out_packets' => 0, 'in_errors' => 0, 'out_errors' => 0 }; + # WL: get the index number of the interface (using additional map in case of cisco) + if (defined($o_ciscocat)) { + if (defined($o_cisco{'use_portnames'}) && defined($result2->{$cisco_port_ifindex_map.'.'.$1})) { + $cport[$num_int] = $1; + $tindex[$num_int] = $result2->{$cisco_port_ifindex_map.'.'.$1}; + } + elsif (defined($cisco_timap{$1})) { + $cport[$num_int] = $cisco_timap{$1}; + $tindex[$num_int] = $1; + } + else { + $tindex[$num_int] = $1; + } + } + else { + $tindex[$num_int] = $1; + } + # WL: find which STP port to retrieve data for that corresponds to this ifindex port + if (defined($o_stp)) { + $stpport[$num_int] = $stp_ifmap{$tindex[$num_int]} if exists($stp_ifmap{$tindex[$num_int]}); + } + # get the full description and get rid of special characters (specially for Windows) + $interfaces[$num_int]{'descr'}=$data1; + $num_int++; + } + } + } + + if ($num_int == 0) { + $session->close; + } + else { + # Change to 64 bit counters if option is set : + if (defined($o_highperf)) { + $out_octet_table=$out_octet_table_64; + $in_octet_table=$in_octet_table_64; + } + + # WL: Prepare list of all OIDs to be retrieved for interfaces we want to check + for (my $i=0;$i<$num_int;$i++) { + verb("Name : $interfaces[$i]{'descr'}, Index : $tindex[$i]"); + + # put the admin or oper oid in an array + $oids[$i]= defined ($o_admin) ? $admin_table . $tindex[$i] : $oper_table . $tindex[$i] ; + $oids_admin[$i]= $admin_table . $tindex[$i]; + # this is for verifying cached description index is correct + # (just in case ifindex port map or cisco port name changes) + if (defined($o_minsnmp) && !defined($o_maxminsnmp)) { + if (defined($o_cisco{'use_portnames'})) { + $oid_descr[$i] = $descr_table .'.'.$cport[$i]; + } + else { + $oid_descr[$i] = $descr_table .'.'.$tindex[$i]; + } + } + if (defined($o_ciscocat) && $cport[$i]) { + if (exists($o_cisco{'oper'}) || exists($copt{'oper'}) || + (scalar(keys %copt)==0 && exists($o_cisco{'auto'}))) { + $oid_ciscooperstatus[$i] = $cisco_port_operstatus_table . $cport[$i]; + } + if (exists($o_cisco{'addoper'}) || exists($copt{'addoper'}) || + (scalar(keys %copt)==0 && exists($o_cisco{'auto'}))) { + $oid_ciscoaddoperstatus[$i] = $cisco_port_addoperstatus_table . $cport[$i]; + } + if (exists($o_cisco{'linkfault'}) || exists($copt{'linkfault'}) || + (scalar(keys %copt)==0 && exists($o_cisco{'auto'}))) { + $oid_ciscofaultstatus[$i] = $cisco_port_linkfaultstatus_table . $cport[$i]; + } + } + if (defined($o_stp)) { + $oid_stpstate[$i] = $stp_dot1dbase_portstate . $stpport[$i] if $stpport[$i]; + } + # Put the performance oid + if (defined($o_perf) || defined($o_checkperf)) { + $oid_perf_inoct[$i]= $in_octet_table . $tindex[$i]; + $oid_perf_outoct[$i]= $out_octet_table . $tindex[$i]; + if (defined($o_ext_checkperf) || defined($o_perfe)) { + $oid_perf_indisc[$i]= $in_discard_table . $tindex[$i]; + $oid_perf_outdisc[$i]= $out_discard_table . $tindex[$i]; + $oid_perf_inerr[$i]= $in_error_table . $tindex[$i]; + $oid_perf_outerr[$i]= $out_error_table . $tindex[$i]; + } + } + if ($check_speed && (!defined($interfaces[$i]{'portspeed'}) || !defined($o_maxminsnmp))) { + $oid_speed[$i]=$speed_table . $tindex[$i]; + $oid_speed_high[$i]=$speed_table_64 . $tindex[$i]; + } + if (defined($o_commentoid)) { + if (defined($o_ciscocat) && defined($o_cisco{'show_portnames'})) { + $oid_commentlabel[$i]=$o_commentoid .'.'. $cport[$i] if $cport[$i]; + } + else { + $oid_commentlabel[$i]=$o_commentoid . $tindex[$i]; + } + } + } + + # put them all together and do as one query when -m option is used + if (defined($o_perf) || defined($o_checkperf) || defined($o_intspeed)) { + @oid_perf=(@oid_perf_outoct,@oid_perf_inoct,@oid_speed); + if (defined($o_highperf)) { + @oid_perf=(@oid_perf,@oid_speed_high); + } + if (defined ($o_ext_checkperf) || defined($o_perfe)) { + @oid_perf=(@oid_perf,@oid_perf_inerr,@oid_perf_outerr,@oid_perf_indisc,@oid_perf_outdisc); + } + } + if (defined($o_ciscocat)) { + @oid_ciscostatus=(@oid_ciscofaultstatus,@oid_ciscooperstatus,@oid_ciscoaddoperstatus); + } + if (defined($o_admindown_ok)) { + push @oids, @oids_admin if scalar(@oids_admin)>0; + } + if (defined($o_minsnmp)) { + push @oids, @oid_perf if scalar(@oid_perf)>0; + push @oids, @oid_descr if scalar(@oid_descr)>0; + push @oids, @oid_commentlabel if defined($o_commentoid) && scalar(@oid_commentlabel)>0; + push @oids, @oid_ciscostatus if defined($o_ciscocat) && scalar(@oid_ciscostatus)>0; + push @oids, @oid_stpstate if defined($o_stp) && scalar(@oid_stpstate)>0; + } + + # Get the requested oid values + snmp_get_request($session, \@oids, "status table", $results); + + # If not doing it as one query, do additional queries + # to get the perf value if -f (performance) option defined or -k (check bandwidth) + if ((defined($o_perf) || defined($o_checkperf) || defined($o_intspeed)) && !defined($o_minsnmp)) { + snmp_get_request($session, \@oid_perf, "statistics table", $results); + } + # and additional cisco status tables + if (defined($o_ciscocat) && !defined($o_minsnmp) && scalar(@oid_ciscostatus)>0) { + snmp_get_request($session, \@oid_ciscostatus, "cisco status tables", $results); + } + # and stp state table if --stp option is given + if (defined($o_stp) && !defined($o_minsnmp) && scalar(@oid_stpstate)>0) { + snmp_get_request($session, \@oid_stpstate, "stp state table", $results); + } + # and additional comments / port description table + if (defined($o_commentoid) && !defined($o_minsnmp) && scalar(@oid_commentlabel)>0) { + snmp_get_request($session, \@oid_commentlabel, "comments table", $results); + } + + $session->close; + + # Now go through the results and populate our interfaces array + for (my $i=0;$i<$num_int;$i++) { + $int_status_extratext=""; + + # First verify description is correct when -m option (but not --mm) is used + if (defined($o_minsnmp) && !defined($o_maxminsnmp)) { + my $dsc=undef; + if (defined($o_cisco{'use_portnames'})) { + $dsc=$results->{$descr_table.'.'. $cport[$i]} if $cport[$i]; + } + else { + $dsc=$results->{$descr_table.'.'. $tindex[$i]}; + } + $dsc = clean_int_name($dsc) if defined($dsc); + if (!defined($dsc) || $dsc ne $interfaces[$i]{'descr'}) { + # WL: Perhaps this is not quite right and there should be "goto" here forcing to retrieve all tables again + if (!defined($dsc)) { + printf("ERROR: Cached port description is %s while retrieved port name is not available\n", $interfaces[$i]{'descr'}); + } + else { + printf("ERROR: Cached port description %s is different then retrieved port name %s\n", $interfaces[$i]{'descr'}, $dsc); + } + exit $ERRORS{"UNKNOWN"}; + } + verb("Name : $dsc [confirmed cached name for port $i]"); + } + + # Admin and Oper Status + $interfaces[$i]{'admin_up'} = $results->{$admin_table.$tindex[$i]} if exists($results->{$admin_table.$tindex[$i]}); + $interfaces[$i]{'oper_up'} = $results->{$oper_table.$tindex[$i]} if exists($results->{$oper_table.$tindex[$i]}); + + # IN and OUT traffic counters, Errors and Dropped counters + if (defined($results->{$oid_perf_inoct[$i]}) && defined($results->{$oid_perf_outoct[$i]})) { + $interfaces[$i]{'in_bytes'}=$results->{$oid_perf_inoct[$i]}; + $interfaces[$i]{'out_bytes'}=$results->{$oid_perf_outoct[$i]}; + $interfaces[$i]{'in_errors'}=0; + $interfaces[$i]{'out_errors'}=0; + $interfaces[$i]{'in_dropped'}=0; + $interfaces[$i]{'out_dropped'}=0; + if (defined($o_ext_checkperf)) { # Add other values (error & disc) + $interfaces[$i]{'in_errors'}=$results->{$oid_perf_inerr[$i]} if defined($results->{$oid_perf_inerr[$i]}); + $interfaces[$i]{'out_errors'}=$results->{$oid_perf_outerr[$i]} if defined($results->{$oid_perf_outerr[$i]}); + $interfaces[$i]{'in_dropped'}=$results->{$oid_perf_indisc[$i]} if defined($results->{$oid_perf_indisc[$i]}); + $interfaces[$i]{'out_dropped'}=$results->{$oid_perf_outdisc[$i]} if defined($results->{$oid_perf_outdisc[$i]}); + } + } + + # Additional description data / Comments OID + if (defined($o_commentoid)) { + if (defined($o_cisco{'show_portnames'})) { + $interfaces[$i]{'descr_extra'} ='('.$results->{$o_commentoid.'.'.$cport[$i]}.')' if $cport[$i] && $results->{$o_commentoid.'.'.$cport[$i]}; + } + else { + $interfaces[$i]{'descr_extra'} ='('.$results->{$o_commentoid.$tindex[$i]}.')' if $results->{$o_commentoid.$tindex[$i]}; + } + } + + # Cisco status data + if (defined($o_ciscocat) && $cport[$i]) { + my ($int_status_cisco,$operstat,$addoperstat)=(undef,undef,undef); + my $cisco_status_extratext=""; + + if (exists($o_cisco{'linkfault'}) || exists($copt{'linkfault'}) || + (scalar(keys %copt)==0 && exists($o_cisco{'auto'}))) { + if (defined($results->{$cisco_port_linkfaultstatus_table.$cport[$i]})) { + $int_status_cisco=$results->{$cisco_port_linkfaultstatus_table.$cport[$i]}; + if (defined($int_status_cisco) && $int_status_cisco !~ /\d+/) { + verb("Received non-integer value for cisco linkfault status when checking port $i: $int_status_cisco"); + $int_status_cisco=undef; + } + if (defined($int_status_cisco) && $int_status_cisco!=1) { + $cisco_status_extratext.=',' if $cisco_status_extratext; + $cisco_status_extratext.=$cisco_port_linkfaultstatus{$int_status_cisco}; + } + } + if (defined($int_status_cisco) && ( + (!defined($o_inverse) && $int_status_cisco!=1) || (defined($o_inverse) && $int_status_cisco==1))) { + $interfaces[$i]{'nagios_status'}=$ERRORS{'CRITICAL'}; + } + } + if (exists($o_cisco{'oper'}) || exists($copt{'oper'}) || + (scalar(keys %copt)==0 && exists($o_cisco{'auto'}))) { + if (defined($results->{$cisco_port_operstatus_table.$cport[$i]})) { + $operstat=$results->{$cisco_port_operstatus_table.$cport[$i]}; + if (defined($operstat) && $operstat !~ /\d+/) { + verb("Received non-integer value for cisco operport status when checking port $i: $operstat"); + $operstat=undef; + } + if (defined($operstat) && $operstat!=2) { + $cisco_status_extratext.=',' if $cisco_status_extratext; + $cisco_status_extratext.=$cisco_port_operstatus{$operstat}; + } + } + if (defined($operstat) && ( + (!defined($o_inverse) && $operstat!=2) || (defined($o_inverse) && $operstat==2))) { + $interfaces[$i]{'nagios_status'}=$ERRORS{'CRITICAL'}; + } + } + if (exists($o_cisco{'addoper'}) || exists($copt{'addoper'}) || + (scalar(keys %copt)==0 && exists($o_cisco{'auto'}))) { + if (defined($results->{$cisco_port_addoperstatus_table.$cport[$i]})) { + $addoperstat=$results->{$cisco_port_addoperstatus_table.$cport[$i]}; + } + if (defined($addoperstat) && ($addoperstat eq 'noSuchInstance' || $addoperstat eq 'noSuchObject')) { + verb("Received invalid value for cisco addoper status when checking port $i: $addoperstat"); + $addoperstat=undef; + } + if (defined($addoperstat)) { + if ($addoperstat !~ /0x.*/) {$addoperstat = hex ascii_to_hex($addoperstat);} else {$addoperstat = hex $addoperstat;} + for (my $j=0; $j<=15;$j++) { # WL: SNMP BITS type - yak! + if ($addoperstat & (1<<(15-$j))) { + $cisco_status_extratext.=',' if $cisco_status_extratext; + $cisco_status_extratext.=$cisco_port_addoperstatus{$j} if $cisco_port_addoperstatus{$j} ne 'connected'; + } + } + } + } + if (scalar(keys %copt)==0 && exists($o_cisco{'auto'})) { + $copt_next{'linkfault'}=1 if defined($int_status_cisco); + $copt_next{'oper'}=1 if defined($operstat); + $copt_next{'addoper'}=1 if defined($addoperstat); + } + if ($cisco_status_extratext) { + $int_status_extratext.=", " if $int_status_extratext; + $int_status_extratext.="CISCO: ".$cisco_status_extratext; + } + } + + # STP state data + if (defined($o_stp) && $stpport[$i]) { + my ($int_stp_state,$prev_stp_state,$prev_stp_changetime)=(undef,undef,undef); + $int_stp_state=$results->{$stp_dot1dbase_portstate.$stpport[$i]}; + if ($int_stp_state !~ /\d+/) { + verb("Received non-numeric status for STP for port $i: $int_stp_state"); + $int_stp_state=undef; + } + $prev_stp_state=prev_perf($interfaces[$i]{'descr'},"stp_state"); + $prev_stp_changetime=prev_perf($interfaces[$i]{'descr'},"stp_changetime"); + if (defined($int_stp_state)) { + $int_status_extratext.=',' if $int_status_extratext; + $int_status_extratext.='STP:'.$stp_portstate{$int_stp_state}; + $perf_out .= " ".perf_name($interfaces[$i]{'descr'},"stp_state")."=".$int_stp_state; + $perf_out .= " ".perf_name($interfaces[$i]{'descr'},"prev_stp_state")."=".$prev_stp_state if defined($prev_stp_state); + if (defined($prev_stp_changetime) && defined($prev_stp_state) && $prev_stp_state == $int_stp_state) { + $perf_out .= " ".perf_name($interfaces[$i]{'descr'},'stp_changetime').'='.$prev_stp_changetime; + } + elsif (!defined($prev_stp_state) || !defined($prev_stp_changetime)) { + $perf_out .= " ".perf_name($interfaces[$i]{'descr'},'stp_changetime').'='.($timenow-$stp_warntime); + } + else { + $perf_out .= " ".perf_name($interfaces[$i]{'descr'},'stp_changetime').'='.$timenow; + } + if ($o_stp ne '' && $int_stp_state != $stp_portstate_reverse{$o_stp}) { + $int_status_extratext.=":CRIT"; + $interfaces[$i]{'nagios_status'} = $ERRORS{'CRITICAL'}; + } + elsif ((defined($prev_stp_changetime) && ($timenow-$prev_stp_changetime)<$stp_warntime) || + (defined($prev_stp_state) && $prev_stp_state != $int_stp_state)) { + $int_status_extratext.=":WARN(change from ". + $stp_portstate{$prev_stp_state}.")"; + $interfaces[$i]{'nagios_status'} = $ERRORS{'WARNING'}; + } + } + } + + # Get the speed in normal or highperf speed counters + if (defined($oid_speed[$i]) && defined($results->{$oid_speed[$i]})) { + if ($results->{$oid_speed[$i]} == 4294967295) { # Too high for this counter (cf IF-MIB) + if (!defined($o_highperf) && $check_speed) { + print "Cannot get interface speed with standard MIB, use highperf mib (-g) : UNKNOWN\n"; + exit $ERRORS{"UNKNOWN"} + } + if (defined ($results->{$oid_speed_high[$i]}) && $results->{$oid_speed_high[$i]} != 0) { + $interfaces[$i]{'portspeed'}=$results->{$oid_speed_high[$i]} * 1000000; + } + elsif ($specified_speed==0) { + print "Cannot get interface speed using highperf mib : UNKNOWN\n"; + exit $ERRORS{"UNKNOWN"} + } + } else { + $interfaces[$i]{'portspeed'}=$results->{$oid_speed[$i]}; + } + } + + # Finished with getting data. Now save extra text for interface status info + if ($int_status_extratext ne '') { + $interfaces[$i]{'status_extratext'} = '' if !exists($interfaces[$i]{'status_extratext'}); + $interfaces[$i]{'status_extratext'} .= ', ' if $interfaces[$i]{'status_extratext'}; + $interfaces[$i]{'status_extratext'} .= $int_status_extratext; + } + + # Prepare index table and desc data for performance output / saved data caching and reuse + if (defined($o_minsnmp) && defined($o_prevperf)) { + @descr=(); + @portspeed=(); + for(my $iii=0;$iii0; + $saved_out.= " cache_descr_names=".join(',',@descr) if scalar(@descr)>0; + $saved_out.= " cache_descr_time=".$perfcache_time if defined($perfcache_time); + $saved_out.= " cache_int_speed=". join(',',@portspeed) if $check_speed && scalar(@portspeed)>0 && defined($o_maxminsnmp) && $specified_speed==0; + if (defined($o_ciscocat)) { + $cport[0]=-1 if scalar(@cport)==0; + $saved_out.= " cache_descr_cport=".join(',',@cport); + if (scalar(keys %copt)>0) { + $saved_out.= " cache_cisco_opt=".join(',',keys %copt); + } + elsif (scalar(keys %copt_next)>0) { + $saved_out.= " cache_cisco_opt=".join(',',keys %copt_next); + } + } + if (defined($o_stp)) { + $stpport[0]=-1 if scalar(@stpport)==0; + $saved_out.= " cache_descr_stpport=".join(',',@stpport); + } + } + } + } + + return $results; +} + +########## MAIN ####### + +check_options(); + +# Check gobal timeout if snmp screws up +if (defined($TIMEOUT)) { + verb("Alarm at $TIMEOUT + 5"); + alarm($TIMEOUT+5); +} else { + verb("no timeout defined : $o_timeout + 10"); + alarm ($o_timeout+10); +} + +$SIG{'ALRM'} = sub { + if (defined($o_host)) { + print "ERROR: alarm timeout. No answer from host $o_host\n"; + } + else { + print "ERROR: alarm timeout\n"; + kill 9, $shell_pid if defined($shell_pid); + } + exit $ERRORS{"UNKNOWN"}; +}; + +#Select interface by regexp of exact match +verb("Filter : $o_descr") if defined($o_descr); + +# WL: check if '-m' option is passed and previous description ids & names are available from +# previous performance data (caching to minimize SNMP lookups and only get specific data +# instead of getting description table every time) +$perfcache_time = prev_perf('cache_descr_time') if defined(prev_perf('cache_descr_time')); + +# Get data from local machine about its interfaces or from SNMP +if ($do_snmp==0) { + getdata_localhost(); +} +else { + getdata_snmp(); +} + +if ($num_int == 0) { + if (defined($o_descr)) { + print "ERROR : Unknown interface $o_descr\n"; + } + else { + print "ERROR : can not find any network interfaces\n"; + } + exit $ERRORS{"UNKNOWN"}; +} + +# some more global variables, some should possibly move further up to main vars definition +my $num_ok=0; +my $num_admindown=0; +my $ok_val= defined ($o_inverse) ? 2 : 1; # define the OK value depending on -i option +my $final_status = 0; +my $print_out=''; +my $temp_file_name; +my @prev_values=(); +my @checkperf_out_raw=undef; +my @checkperf_out=undef; +my $checkval_out=undef; +my $checkval_in=undef; +my $checkval_tdiff=undef; +my $usable_data=0; +my $n_rows=0; +my $n_items_check=(defined($o_ext_checkperf))?7:3; +my $trigger=$timenow - ($o_delta - ($o_delta/4)); +my $trigger_low=$timenow - 4*$o_delta; +my $old_value=undef; +my $old_time=undef; +my $speed_unit=undef; +my $speed_metric=undef; + +# make all checks and output for all interfaces +for (my $i=0;$i < $num_int; $i++) { + $print_out.=", " if ($print_out); + $perf_out .= " " if ($perf_out); + my $usable_data=1; # 0 is OK, 1 means its not OK + + # Get the status of the current interface + my $int_status = $ok_val; + my $admin_int_status = $ok_val; + my $int_status_extratext = ""; + if (!defined($o_ignorestatus)) { + if (!exists($interfaces[$i]{'up_status'})) { + if (defined($o_admin) && exists($interfaces[$i]{'admin_up'})) { + $interfaces[$i]{'up_status'} = $interfaces[$i]{'admin_up'}; + } + elsif (exists($interfaces[$i]{'oper_up'})) { + $interfaces[$i]{'up_status'} = $interfaces[$i]{'oper_up'}; + } + else { + printf("ERROR: Can not find up status for interface ".$interfaces[$i]{'descr'}); + exit $ERRORS{"UNKNOWN"}; + } + } + if (defined($o_admindown_ok) && exists($interfaces[$i]{'admin_up'})) { + $admin_int_status = $interfaces[$i]{'admin_up'}; + $int_status_extratext.="ADMIN:".$status_print{$admin_int_status} if $admin_int_status ne $interfaces[$i]{'up_status'}; + } + $int_status = $interfaces[$i]{'up_status'} if exists($interfaces[$i]{'up_status'}); + } + if (exists($interfaces[$i]{'status_extratext'})) { + $int_status_extratext.=", " if $int_status_extratext; + $int_status_extratext.=$interfaces[$i]{'status_extratext'}; + } + + # Get the final output interface description ready + my $int_desc=""; + my $descr=$interfaces[$i]{'descr'}; + if (defined ($o_short)) { + if ($o_short < 0) { + $int_desc=substr($descr,$o_short); + } + else { + $int_desc=substr($descr,0,$o_short); + } + } + else { + $int_desc = $descr; + } + if (exists($interfaces[$i]{'descr_extra'})) { + $int_desc .= $interfaces[$i]{'descr_extra'}; + } + $interfaces[$i]{'full_descr'}=$int_desc; + + # Interface Speed + if ($specified_speed!=0 && !exists($interfaces[$i]{'portspeed'})) { + $interfaces[$i]{'portspeed'} = $specified_speed; + } + if (defined($speed_alert) && defined($interfaces[$i]{'portspeed'}) && $interfaces[$i]{'portspeed'}!=$specified_speed) { + $int_status_extratext.=',' if $int_status_extratext; + $int_status_extratext.="$speed_alert: Speed=".$interfaces[$i]{'portspeed'}." bps"; + $int_status_extratext.=" (should be $specified_speed bps)"; + $interfaces[$i]{'nagios_status'}=$ERRORS{$speed_alert} if !exists($interfaces[$i]{'nagios_status'}) || $ERRORS{$speed_alert} > $interfaces[$i]{'nagios_status'}; + } + verb ("Interface $i speed : ".$interfaces[$i]{'portspeed'}) if defined($interfaces[$i]{'portspeed'}); + + $final_status = $interfaces[$i]{'nagios_status'} if exists($interfaces[$i]{'nagios_status'}) && $final_status < $interfaces[$i]{'nagios_status'}; + + # Make the bandwidth & error checks if necessary + if (defined ($o_checkperf) && $int_status==$status{'UP'}) { + + # WL: checks if previous performance data & time last check was run are available + if ($o_filestore || !defined($o_prevperf)) { + if ($o_filestore && length($o_filestore)>1 && ! -d $o_filestore) { + $temp_file_name = $o_filestore; + } + else { + $temp_file_name = $descr; + $temp_file_name =~ s/[ ;\/]/_/g; + $temp_file_name = (length($o_filestore)>1 && -d $o_filestore ? $o_filestore : $o_base_dir) . (defined($o_host)?$o_host.".":"") . $temp_file_name; + } + # First, read entire file + my @ret_array=read_file($temp_file_name,$n_items_check); + $usable_data = shift(@ret_array); + $n_rows = shift(@ret_array); + if ($n_rows != 0) { @prev_values = @ret_array }; + verb ("File read returns : $usable_data with $n_rows rows"); + } + # WL: if one or more sets of previous performance data is available + # then put it in prev_values array and use as history data + # [TODO] this code is still a bit buggy as far as checking for bad + # or missing values in previous performance data + else { + my $j=0; + my $jj=0; + my $data_ok; + my $timeref=''; + for (;$j<$o_pcount && exists($prev_time[$j]); $j++) { + $data_ok=1; + $timeref='.'.$prev_time[$j]; + $timeref='' if prev_perf('ptime') eq $prev_time[$j]; + $prev_values[$jj]=[ $prev_time[$j], + prev_perf($descr,'in_octet'.$timeref), + prev_perf($descr,'out_octet'.$timeref), + prev_perf($descr,'in_error'.$timeref), + prev_perf($descr,'out_error'.$timeref), + prev_perf($descr,'in_discard'.$timeref), + prev_perf($descr,'out_discard'.$timeref) ]; + # this checks if data is ok and not, this set of values would not be used + # and next set put in its place as $jj is not incrimented + for (my $k=1;$k<(defined($o_ext_checkperf)?7:3);$k++) { + if (!defined($prev_values[$jj][$k]) || $prev_values[$jj][$k] !~ /\d+/) { + $prev_values[$jj][$k]=0; + $data_ok=0 if $k<3; + } + } + if ($data_ok && $prev_values[$jj][1]!=0 && $prev_values[$jj][2]!=0) { + $jj++; + } + else { + $prev_values[$jj][0]=0; + } + } + $n_rows = $jj; + if ($jj==0) { $usable_data=1 } #NAK + else { $usable_data=0; } # OK + } + verb("Previous data array created: $n_rows rows"); + + # Put the new values in the array + if (defined($interfaces[$i]{'in_bytes'}) && defined($interfaces[$i]{'out_bytes'})) { + $prev_values[$n_rows]=[ $timenow, $interfaces[$i]{'in_bytes'}, $interfaces[$i]{'out_bytes'}, + $interfaces[$i]{'in_errors'}, $interfaces[$i]{'out_errors'}, + $interfaces[$i]{'in_dropped'}, $interfaces[$i]{'out_dropped'} ]; + $n_rows++; + } + #make the checks if the file is OK + if ($usable_data==0) { + my $j; + my $jj=0; + my $n=0; + my $overfl; + @checkperf_out=(0,0,0,0,0,0); + @checkperf_out_raw=(); + $checkval_in=undef; + $checkval_out=undef; + $checkval_tdiff=undef; + + # check if the counter is back to 0 after 2^32 / 2^64. + # First set the modulus depending on highperf counters or not + my $overfl_mod = defined ($o_highperf) ? 18446744073709551616 : 4294967296; + + # Define the speed metric ( K | M | G ) (Bits|Bytes) or % + if (defined($o_prct)) { # in % of speed + # Speed is in bits/s, calculated speed is in Bytes/s + if (defined($interfaces[$i]{'portspeed'})) { + $speed_metric=$interfaces[$i]{'portspeed'}/800; + $speed_unit='%'; + } + } else { + if (defined($o_kbits)) { # metric in bits + if (defined($o_meg)) { # in Mbit/s = 1000000 bit/s + $speed_metric=125000; # (1000/8) * 1000 + $speed_unit="Mbps"; + } elsif (defined($o_gig)) { # in Gbit/s = 1000000000 bit/s + $speed_metric=125000000; # (1000/8) * 1000 * 1000 + $speed_unit="Gbps"; + } else { # in Kbits + $speed_metric=125; # ( 1000/8 ) + $speed_unit="Kbps"; + } + } else { # metric in byte + if (defined($o_meg)) { # in Mbits + $speed_metric=1048576; # 1024^2 + $speed_unit="MBps"; + } elsif (defined($o_gig)) { # in Mbits + $speed_metric=1073741824; # 1024^3 + $speed_unit="GBps"; + } else { + $speed_metric=1024; # 1024^1 + $speed_unit="KBps"; + } + } + } + + # Calculate averages & metrics + $j=$n_rows-1; + do { + if ($prev_values[$j][0] < $trigger) { + if ($prev_values[$j][0] > $trigger_low) { + if (($checkval_tdiff=$prev_values[$j+1][0]-$prev_values[$j][0])!=0) { + # check_perf_out_raw is array used to store calculations from multiple counts + $checkperf_out_raw[$jj] = [ 0,0,0,0,0 ]; + + # Check counter (s) + if ($prev_values[$j+1][1]!=0 && $prev_values[$j][1]!=0) { + $overfl = ($prev_values[$j+1][1] >= $prev_values[$j][1] ) ? 0 : $overfl_mod; + $checkval_in = ($overfl + $prev_values[$j+1][1] - $prev_values[$j][1]) / $checkval_tdiff ; + $checkperf_out_raw[$jj][0] = $checkval_in / $speed_metric; + } + if ($prev_values[$j+1][2]!=0 && $prev_values[$j][2]!=0) { + $overfl = ($prev_values[$j+1][2] >= $prev_values[$j][2] ) ? 0 : $overfl_mod; + $checkval_out = ($overfl + $prev_values[$j+1][2] - $prev_values[$j][2]) / $checkval_tdiff; + $checkperf_out_raw[$jj][1] = $checkval_out / $speed_metric; + } + if (defined($o_ext_checkperf)) { + $checkperf_out_raw[$jj][2] = ( ($prev_values[$j+1][3] - $prev_values[$j][3])/ $checkval_tdiff )*60; + $checkperf_out_raw[$jj][3] = ( ($prev_values[$j+1][4] - $prev_values[$j][4])/ $checkval_tdiff )*60; + $checkperf_out_raw[$jj][4] = ( ($prev_values[$j+1][5] - $prev_values[$j][5])/ $checkval_tdiff )*60; + $checkperf_out_raw[$jj][5] = ( ($prev_values[$j+1][6] - $prev_values[$j][6])/ $checkval_tdiff )*60; + } + $jj++ if $checkperf_out_raw[$jj][0]!=0 || $checkperf_out_raw[$jj][1]!=0; + } + } + } + $j--; + } while ( $j>=0 && $jj<$o_pcount ); + + # Calculate total as average + if ($jj>0) { + for (my $k=0;$k<5;$k++) { + $n=0; + for ($j=0;$j<$jj;$j++) { + if ($checkperf_out_raw[$j][$k]!=0) { + $n++; + $checkperf_out[$k]+=$checkperf_out_raw[$j][$k]; + } + } + if ($n>0) { + $checkperf_out[$k]=$checkperf_out[$k]/$n; + } + } + } + else { + $usable_data=1; + } + } + + # WL: modified to not write the file if both -P and -T options are used + # -z option used: don't write the tmp file + if (defined($temp_file_name) && !defined($o_zerothresholds) && ($o_filestore || !$o_prevperf || !$o_prevtime)) { + if (($_=write_file($temp_file_name,$n_rows,$n_items_check,@prev_values))!=0) { + $final_status=3; + $print_out.= " !!Unable to write file ".$temp_file_name." !! "; + verb ("Write file returned : $_"); + } + } + # Print the basic status + $print_out.=sprintf("%s:%s",$int_desc, $status_print{$int_status}); + $print_out.=' ['.$int_status_extratext.']' if $int_status_extratext; + # print the other checks if it was calculated + if ($usable_data==0 && defined($checkperf_out[0])) { + $print_out.= " ("; + # check 2 or 6 values depending on ext_check_perf + my $num_checkperf=(defined($o_ext_checkperf))?6:2; + for (my $l=0;$l < $num_checkperf;$l++) { + # Set labels if needed + $checkperf_out_desc= (defined($o_label)) ? $countername[$l] : ""; + verb("Interface $i, threshold check $l : $checkperf_out[$l]"); + $print_out.="/" if $l!=0; + if ((defined($o_crit_max[$l]) && $o_crit_max[$l] && ($checkperf_out[$l]>$o_crit_max[$l])) || + (defined($o_crit_min[$l]) && $o_crit_min[$l] && ($checkperf_out[$l]<$o_crit_min[$l]))) { + $final_status=2; + $print_out.= sprintf("CRIT %s%.1f",$checkperf_out_desc,$checkperf_out[$l]); + } elsif ((defined($o_warn_max[$l]) && $o_warn_max[$l] && ($checkperf_out[$l]>$o_warn_max[$l])) || + (defined($o_warn_min[$l]) && $o_warn_min[$l] && ($checkperf_out[$l]<$o_warn_min[$l]))) { + $final_status=($final_status==2)?2:1; + $print_out.= sprintf("WARN %s%.1f",$checkperf_out_desc,$checkperf_out[$l]); + } else { + $print_out.= sprintf("%s%.1f",$checkperf_out_desc,$checkperf_out[$l]); + } + $print_out.= $speed_unit if defined($speed_unit) && ($l==0 || $l==1); + } + $print_out .= ")"; + } + elsif (!defined($o_zerothresholds)) { + $print_out.= " (no usable data - ".$n_rows." rows) "; + # WL: I've removed return of UNKNOWN if no data is available, though when plugin is first run that may still happen + # $final_status=3; + } + } + else { + $print_out.=sprintf("%s:%s",$int_desc, $status_print{$int_status}); + $print_out.=' ['.$int_status_extratext.']' if $int_status_extratext; + } + if ((($int_status == $ok_val) || (defined($o_dormant) && $int_status == $status{'DORMANT'}))) { + # used to also be: "&& $int_status_opt==0" but I removed it in 2.4 so only main (admin/oper) status is counted for up/down + $num_ok++; + } elsif (defined($o_admindown_ok) && $ok_val==1 && !$o_admin && $int_status == $status{'DOWN'} && $admin_int_status == $status{'DOWN'}) { + # we want to check for operational UP interfaces, so don't check those that are supposed to be down (administratively down) + $num_admindown++; + } + # Don't return performance data for interfaces that are down and are supposed to be down + if (!(defined($o_admindown_ok) && $ok_val==1 && $int_status == $status{'DOWN'} && $admin_int_status == $status{'DOWN'}) && defined($interfaces[$i]{'descr'}) && (defined($o_perf) || defined($o_intspeed) || defined($o_perfr) || defined($o_perfp) || defined($o_checkperf))) { + if (defined ($o_perfp)) { # output in % of speed + if ($usable_data==0 && defined($checkperf_out[0]) && defined($checkperf_out[1])) { + if (defined($o_prct)) { + $perf_out .= " ".perf_name($descr,"in_prct")."="; + $perf_out .= sprintf("%.0f",$checkperf_out[0]) . '%;'; + $perf_out .= (defined($o_warn_max[0]) && $o_warn_max[0]) ? $o_warn_max[0] . ";" : ";"; + $perf_out .= (defined($o_crit_max[0]) && $o_crit_max[0]) ? $o_crit_max[0] . ";" : ";"; + $perf_out .= "0;100 "; + $perf_out .= " ".perf_name($descr,"out_prct")."="; + $perf_out .= sprintf("%.0f",$checkperf_out[1]) . '%;'; + $perf_out .= (defined($o_warn_max[1]) && $o_warn_max[1]) ? $o_warn_max[1] . ";" : ";"; + $perf_out .= (defined($o_crit_max[1]) && $o_crit_max[1]) ? $o_crit_max[1] . ";" : ";"; + $perf_out .= "0;100 "; + } + elsif (defined($interfaces[$i]{'portspeed'}) && $interfaces[$i]{'portspeed'} != 0) { + $perf_out .= " ".perf_name($descr,"in_prct")."="; + $perf_out .= sprintf("%.0f", $checkperf_out[0]*$speed_metric/$interfaces[$i]{'portspeed'}*800). '%'; + $perf_out .= " ".perf_name($descr,"out_prct")."="; + $perf_out .= sprintf("%.0f", $checkperf_out[1]*$speed_metric/$interfaces[$i]{'portspeed'}*800). '%'; + } + else { + verb("we do not have information on speed of interface $i (".$interfaces[$i]{'descr'}.")"); + } + } + } elsif (defined ($o_perfr)) { # output in bites or Bytes /s + if ($usable_data==0) { + if (defined($o_kbits)) { # bps + # put warning and critical levels into bps or Bps + my $warn_factor=undef; + if (defined($o_prct)) { # warn&crit in % -> put warn_factor to 1% of speed in bps + $warn_factor=$interfaces[$i]{'portspeed'}/100 if defined($interfaces[$i]{'portspeed'}); + } else { # just convert from K|M|G bps + $warn_factor = (defined($o_meg)) ? 1000000 : (defined($o_gig)) ? 1000000000 : 1000; + } + if (defined($warn_factor)) { + $perf_out .= " ".perf_name($descr,"in_bps")."="; + $perf_out .= sprintf("%.0f",$checkperf_out[0] * 8 * $speed_metric) .";" if defined($checkperf_out[0]); + $perf_out .= (defined($o_warn_max[0]) && $o_warn_max[0]) ? $o_warn_max[0]*$warn_factor . ";" : ";"; + $perf_out .= (defined($o_crit_max[0]) && $o_crit_max[0]) ? $o_crit_max[0]*$warn_factor . ";" : ";"; + $perf_out .= "0;". $interfaces[$i]{'portspeed'} ." " if defined($interfaces[$i]{'portspeed'}); + $perf_out .= " ".perf_name($descr, "out_bps"). "="; + $perf_out .= sprintf("%.0f",$checkperf_out[1] * 8 * $speed_metric) .";" if defined($checkperf_out[1]); + $perf_out .= (defined($o_warn_max[1]) && $o_warn_max[1]) ? $o_warn_max[1]*$warn_factor . ";" : ";"; + $perf_out .= (defined($o_crit_max[1]) && $o_crit_max[1]) ? $o_crit_max[1]*$warn_factor . ";" : ";"; + $perf_out .= "0;". $interfaces[$i]{'portspeed'} ." " if defined($interfaces[$i]{'portspeed'}); + } + } else { # Bps + my $warn_factor = undef; + if (defined($o_prct)) { # warn&crit in % -> put warn_factor to 1% of speed in Bps + $warn_factor=$interfaces[$i]{'portspeed'}/800 if defined($interfaces[$i]{'portspeed'}); + } else { # just convert from K|M|G bps + $warn_factor = (defined($o_meg)) ? 1048576 : (defined($o_gig)) ? 1073741824 : 1024; + } + if (defined($warn_factor)) { + $perf_out .= " ".perf_name($descr,"in_Bps")."=" . sprintf("%.0f",$checkperf_out[0] * $speed_metric) .";" if defined($checkperf_out[0]); + $perf_out .= (defined($o_warn_max[0]) && $o_warn_max[0]) ? $o_warn_max[0]*$warn_factor . ";" : ";"; + $perf_out .= (defined($o_crit_max[0]) && $o_crit_max[0]) ? $o_crit_max[0]*$warn_factor . ";" : ";"; + $perf_out .= "0;". $interfaces[$i]{'portspeed'} / 8 ." " if defined($interfaces[$i]{'portspeed'}); + $perf_out .= " ".perf_name($descr,"out_Bps")."=" . sprintf("%.0f",$checkperf_out[1] * $speed_metric) .";" if defined($checkperf_out[1]); + $perf_out .= (defined($o_warn_max[1]) && $o_warn_max[1]) ? $o_warn_max[1]*$warn_factor . ";" : ";"; + $perf_out .= (defined($o_crit_max[1]) && $o_crit_max[1]) ? $o_crit_max[1]*$warn_factor . ";" : ";"; + $perf_out .= "0;". $interfaces[$i]{'portspeed'} / 8 ." " if defined($interfaces[$i]{'portspeed'}); + } + } + } + } + # output in octet counter + if (defined($o_perfo) || (defined($o_prevperf) && !defined($o_nagios_saveddata))) { + # we add 'c' for graphing programs to know its a COUNTER + $perf_out .= " ".perf_name($descr,"in_octet")."=". $interfaces[$i]{'in_bytes'}."c"; + $perf_out .= " ".perf_name($descr,"out_octet")."=". $interfaces[$i]{'out_bytes'}."c"; + } + if (defined($o_prevperf) && defined($o_nagios_saveddata)) { + # we don't need to add 'c' if saved data is separate from perfdata + $saved_out .= " ".perf_name($descr,"in_octet")."=". $interfaces[$i]{'in_bytes'}; + $saved_out .= " ".perf_name($descr,"out_octet")."=". $interfaces[$i]{'out_bytes'}; + } + if (defined ($o_perfe) && defined($o_ext_checkperf)) { + # these are techinically counters too, but its better to have them graphed as total + $perf_out .= " ".perf_name($descr,"in_error")."=". $interfaces[$i]{'in_errors'}; + $perf_out .= " ".perf_name($descr,"out_error")."=". $interfaces[$i]{'out_errors'}; + $perf_out .= " ".perf_name($descr,"in_discard")."=". $interfaces[$i]{'in_dropped'} if defined ($interfaces[$i]{'in_dropped'}); + $perf_out .= " ".perf_name($descr,"out_discard")."=". $interfaces[$i]{'out_dropped'} if defined ($interfaces[$i]{'out_dropped'}); + } + if (defined($interfaces[$i]{'portspeed'}) && defined($o_perf) && defined($o_intspeed)) { + $perf_out .= " ".perf_name($descr,"speed_bps")."=".$interfaces[$i]{'portspeed'}; + } + } +} + +# Add additional sets of previous performance data +# do it at the very end so that if nagios does cut performance data +# due to limits in its buffer space then what is cut is part of this data +my ($pcount,$loop_time); +if (defined($o_prevperf) && $o_pcount>0) { + for (my $i=0; $i<$num_int; $i++) { + $pcount=0; + foreach $loop_time (reverse sort(@prev_time)) { + if (defined($interfaces[$i]{'descr'}) && $pcount<($o_pcount-1)) { + my $timeref='.'.$loop_time; + $timeref='' if defined(prev_perf('ptime')) && prev_perf('ptime') eq $loop_time; + if (defined(prev_perf($interfaces[$i]{'descr'},'in_octet'.$timeref)) && + defined(prev_perf($interfaces[$i]{'descr'},'in_octet'.$timeref))) { + $saved_out .= " ".perf_name($interfaces[$i]{'descr'},'in_octet.'.$loop_time).'='.prev_perf($interfaces[$i]{'descr'},'in_octet'.$timeref); + $saved_out .= " ".perf_name($interfaces[$i]{'descr'},'out_octet.'.$loop_time).'='.prev_perf($interfaces[$i]{'descr'},'out_octet'.$timeref); + } + if (defined ($o_perfe) && + defined(prev_perf($interfaces[$i]{'descr'},'in_error'.$timeref)) && + defined(prev_perf($interfaces[$i]{'descr'},'out_error'.$timeref)) && + defined(prev_perf($interfaces[$i]{'descr'},'in_discard'.$timeref)) && + defined(prev_perf($interfaces[$i]{'descr'},'out_discard'.$timeref))) { + $saved_out .= " ".perf_name($interfaces[$i]{'descr'},'in_error.'.$loop_time).'='.prev_perf($interfaces[$i]{'descr'},'in_error'.$timeref); + $saved_out .= " ".perf_name($interfaces[$i]{'descr'},'out_error.'.$loop_time).'='.prev_perf($interfaces[$i]{'descr'},'out_error'.$timeref); + $saved_out .= " ".perf_name($interfaces[$i]{'descr'},'in_discard.'.$loop_time).'='.prev_perf($interfaces[$i]{'descr'},'in_discard'.$timeref); + $saved_out .= " ".perf_name($interfaces[$i]{'descr'},'out_discard.'.$loop_time).'='.prev_perf($interfaces[$i]{'descr'},'out_discard'.$timeref); + } + $pcount++; + } + } + } + $saved_out .= " ptime=".$timenow; +} + +# Only a few ms left... +alarm(0); + +# WL: partially rewritten these last steps to minimize amount of code +# Check if all interface are OK +my $exit_status="UNKNOWN"; +if (($num_ok == $num_int) || (defined($o_admindown_ok) && $num_ok+$num_admindown == $num_int) ) { + $exit_status="OK" if $final_status==0; + $exit_status="WARNING" if $final_status==1; + $exit_status="CRITICAL" if $final_status==2; + if (defined($o_admindown_ok)) { + print $print_out," ($num_ok UP, $num_admindown ADMIN DOWN): $exit_status"; + } else { + print $print_out," ($num_ok UP): $exit_status"; + } +} +# print the not OK interface number and exit (return is always critical if at least one int is down) +else { + $exit_status="CRITICAL"; + print $print_out,": ", $num_int-$num_ok-$num_admindown, " int NOK : CRITICAL"; +} +print " | ",$perf_out if defined($perf_out) && $perf_out; +if (defined($saved_out) && $saved_out) { + print " ||" if defined($o_nagios_saveddata); + print $saved_out; +} +print "\n"; +exit $ERRORS{$exit_status}; diff --git a/nagios/files/check_postgresql_backup b/nagios/files/check_postgresql_backup new file mode 100644 index 00000000..43401db5 --- /dev/null +++ b/nagios/files/check_postgresql_backup @@ -0,0 +1,38 @@ +#!/bin/bash + +TIMESTAMP=$( date +%s ) +ONEDAY=86400 +RETVAL=0 +MSG= +if [ -f /etc/default/pg_backup ] ; then + . /etc/default/pg_backup +else + BACKUPDIR=/var/lib/pgsql/backups +fi +TIMESTAMP_LOG=$BACKUPDIR/.timestamp +NAGIOS_LOG=$BACKUPDIR/.nagios-status + +function check_db_freshness() { + DB_LASTRUN=$( cat $TIMESTAMP_LOG ) + FRESHNESS=$( echo "$TIMESTAMP - $DB_LASTRUN" | bc ) + if [ $FRESHNESS -gt $ONEDAY ] ; then + MSG_FRESH="Dump older than 1 day ; " + RETVAL=1 + fi +} + +function check_dump_status() { + MSG_STATUS=$( grep FAILED $NAGIOS_LOG ) + if [ $? -eq 0 ] ; then + RETVAL=2 + else + MSG_STATUS="All dumps OK" + fi +} + +check_db_freshness +check_dump_status + +MSG="$MSG_FRESH $MSG_STATUS" +echo -n $MSG +exit $RETVAL diff --git a/nagios/files/check_smart b/nagios/files/check_smart new file mode 100755 index 00000000..488eaec8 --- /dev/null +++ b/nagios/files/check_smart @@ -0,0 +1,311 @@ +#!/usr/bin/perl -w +# Check SMART status of ATA/SCSI disks, returning any usable metrics as perfdata. +# For usage information, run ./check_smart -h +# +# This script was created under contract for the US Government and is therefore Public Domain +# +# Changes and Modifications +# ========================= +# Feb 3, 2009: Kurt Yoder - initial version of script + +use strict; +use Getopt::Long; + +use File::Basename qw(basename); +my $basename = basename($0); + +my $revision = '$Revision: 1.0 $'; + +use lib '/usr/lib/nagios/plugins/'; +use utils qw(%ERRORS &print_revision &support &usage); + +$ENV{'PATH'}='/bin:/usr/bin:/sbin:/usr/sbin'; +$ENV{'BASH_ENV'}=''; +$ENV{'ENV'}=''; + +use vars qw($opt_d $opt_debug $opt_h $opt_i $opt_v); +Getopt::Long::Configure('bundling'); +GetOptions( + "debug" => \$opt_debug, + "d=s" => \$opt_d, "device=s" => \$opt_d, + "h" => \$opt_h, "help" => \$opt_h, + "i=s" => \$opt_i, "interface=s" => \$opt_i, + "v" => \$opt_v, "version" => \$opt_v, +); + +if ($opt_v) { + print_revision($basename,$revision); + exit $ERRORS{'OK'}; +} + +if ($opt_h) { + print_help(); + exit $ERRORS{'OK'}; +} + +my ($device, $interface) = qw//; +if ($opt_d) { + unless($opt_i){ + print "must specify an interface for $opt_d using -i/--interface!\n\n"; + print_help(); + exit $ERRORS{'UNKNOWN'}; + } + + if (-b $opt_d){ + $device = $opt_d; + } + else{ + print "$opt_d is not a valid block device!\n\n"; + print_help(); + exit $ERRORS{'UNKNOWN'}; + } + + if(grep {$opt_i eq $_} ('ata', 'scsi')){ + $interface = $opt_i; + } + else{ + print "invalid interface $opt_i for $opt_d!\n\n"; + print_help(); + exit $ERRORS{'UNKNOWN'}; + } +} +else{ + print "must specify a device!\n\n"; + print_help(); + exit $ERRORS{'UNKNOWN'}; +} + +my $smart_command = '/usr/bin/sudo /usr/sbin/smartctl'; +my @error_messages = qw//; +my $exit_status = 'OK'; + + +warn "###########################################################\n" if $opt_debug; +warn "(debug) CHECK 1: getting overall SMART health status\n" if $opt_debug; +warn "###########################################################\n\n\n" if $opt_debug; + +my $full_command = "$smart_command -d $interface -H $device"; +warn "(debug) executing:\n$full_command\n\n" if $opt_debug; + +my @output = `$full_command`; +warn "(debug) output:\n@output\n\n" if $opt_debug; + +# parse ata output, looking for "health status: passed" +my $found_status = 0; +my $line_str = 'SMART overall-health self-assessment test result: '; # ATA SMART line +my $ok_str = 'PASSED'; # ATA SMART OK string + +if ($interface eq 'scsi'){ + $line_str = 'SMART Health Status: '; # SCSI SMART line + $ok_str = 'OK'; #SCSI SMART OK string +} + +foreach my $line (@output){ + if($line =~ /$line_str(.+)/){ + $found_status = 1; + warn "(debug) parsing line:\n$line\n\n" if $opt_debug; + if ($1 eq $ok_str) { + warn "(debug) found string '$ok_str'; status OK\n\n" if $opt_debug; + } + else { + warn "(debug) no '$ok_str' status; failing\n\n" if $opt_debug; + push(@error_messages, "Health status: $1"); + escalate_status('CRITICAL'); + } + } +} + +unless ($found_status) { + push(@error_messages, 'No health status line found'); + escalate_status('UNKNOWN'); +} + + +warn "###########################################################\n" if $opt_debug; +warn "(debug) CHECK 2: getting silent SMART health check\n" if $opt_debug; +warn "###########################################################\n\n\n" if $opt_debug; + +$full_command = "$smart_command -d $interface -q silent -A $device"; +warn "(debug) executing:\n$full_command\n\n" if $opt_debug; + +system($full_command); +my $return_code = $?; +warn "(debug) exit code:\n$return_code\n\n" if $opt_debug; + +if ($return_code & 0x01) { + push(@error_messages, 'Commandline parse failure'); + escalate_status('UNKNOWN'); +} +if ($return_code & 0x02) { + push(@error_messages, 'Device could not be opened'); + escalate_status('UNKNOWN'); +} +if ($return_code & 0x04) { + push(@error_messages, 'Checksum failure'); + escalate_status('WARNING'); +} +if ($return_code & 0x08) { + push(@error_messages, 'Disk is failing'); + escalate_status('CRITICAL'); +} +if ($return_code & 0x10) { + push(@error_messages, 'Disk is in prefail'); + escalate_status('WARNING'); +} +if ($return_code & 0x20) { + push(@error_messages, 'Disk may be close to failure'); + escalate_status('WARNING'); +} +if ($return_code & 0x40) { + push(@error_messages, 'Error log contains errors'); + escalate_status('WARNING'); +} +if ($return_code & 0x80) { + push(@error_messages, 'Self-test log contains errors'); + escalate_status('WARNING'); +} +if ($return_code && !$exit_status) { + push(@error_messages, 'Unknown return code'); + escalate_status('CRITICAL'); +} + +if ($return_code) { + warn "(debug) non-zero exit code, generating error condition\n\n" if $opt_debug; +} +else { + warn "(debug) zero exit code, status OK\n\n" if $opt_debug; +} + + +warn "###########################################################\n" if $opt_debug; +warn "(debug) CHECK 3: getting detailed statistics\n" if $opt_debug; +warn "(debug) information contains a few more potential trouble spots\n" if $opt_debug; +warn "(debug) plus, we can also use the information for perfdata/graphing\n" if $opt_debug; +warn "###########################################################\n\n\n" if $opt_debug; + +$full_command = "$smart_command -d $interface -A $device"; +warn "(debug) executing:\n$full_command\n\n" if $opt_debug; +@output = `$full_command`; +warn "(debug) output:\n@output\n\n" if $opt_debug; +my @perfdata = qw//; + +# separate metric-gathering and output analysis for ATA vs SCSI SMART output +if ($interface eq 'ata'){ + foreach my $line(@output){ + # get lines that look like this: + # 9 Power_On_Minutes 0x0032 241 241 000 Old_age Always - 113h+12m + next unless $line =~ /^\s*\d+\s(\S+)\s+(?:\S+\s+){6}(\S+)\s+(\d+)/; + my ($attribute_name, $when_failed, $raw_value) = ($1, $2, $3); + if ($when_failed ne '-'){ + push(@error_messages, "Attribute $attribute_name failed at $when_failed"); + escalate_status('WARNING'); + warn "(debug) parsed SMART attribute $attribute_name with error condition:\n$when_failed\n\n" if $opt_debug; + } + # some attributes produce questionable data; no need to graph them + if (grep {$_ eq $attribute_name} ('Unknown_Attribute', 'Power_On_Minutes') ){ + next; + } + push (@perfdata, "$attribute_name=$raw_value"); + + # do some manual checks + if ( ($attribute_name eq 'Current_Pending_Sector') && $raw_value ) { + push(@error_messages, "Sectors pending re-allocation"); + escalate_status('WARNING'); + warn "(debug) Current_Pending_Sector is non-zero ($raw_value)\n\n" if $opt_debug; + } + } +} +else{ + my ($current_temperature, $max_temperature, $current_start_stop, $max_start_stop) = qw//; + foreach my $line(@output){ + if ($line =~ /Current Drive Temperature:\s+(\d+)/){ + $current_temperature = $1; + } + elsif ($line =~ /Drive Trip Temperature:\s+(\d+)/){ + $max_temperature = $1; + } + elsif ($line =~ /Current start stop count:\s+(\d+)/){ + $current_start_stop = $1; + } + elsif ($line =~ /Recommended maximum start stop count:\s+(\d+)/){ + $max_start_stop = $1; + } + elsif ($line =~ /Elements in grown defect list:\s+(\d+)/){ + push (@perfdata, "defect_list=$1"); + } + elsif ($line =~ /Blocks sent to initiator =\s+(\d+)/){ + push (@perfdata, "sent_blocks=$1"); + } + } + if($current_temperature){ + if($max_temperature){ + push (@perfdata, "temperature=$current_temperature;;$max_temperature"); + if($current_temperature > $max_temperature){ + warn "(debug) Disk temperature is greater than max ($current_temperature > $max_temperature)\n\n" if $opt_debug; + push(@error_messages, 'Disk temperature is higher than maximum'); + escalate_status('CRITICAL'); + } + } + else{ + push (@perfdata, "temperature=$current_temperature"); + } + } + if($current_start_stop){ + if($max_start_stop){ + push (@perfdata, "start_stop=$current_start_stop;$max_start_stop"); + if($current_start_stop > $max_start_stop){ + warn "(debug) Disk start_stop is greater than max ($current_start_stop > $max_start_stop)\n\n" if $opt_debug; + push(@error_messages, 'Disk start_stop is higher than maximum'); + escalate_status('WARNING'); + } + } + else{ + push (@perfdata, "start_stop=$current_start_stop"); + } + } +} +warn "(debug) gathered perfdata:\n@perfdata\n\n" if $opt_debug; +my $perf_string = join(' ', @perfdata); + +warn "###########################################################\n" if $opt_debug; +warn "(debug) FINAL STATUS: $exit_status\n" if $opt_debug; +warn "###########################################################\n\n\n" if $opt_debug; + +warn "(debug) final status/output:\n" if $opt_debug; + +my $status_string = ''; + +if($exit_status ne 'OK'){ + $status_string = "$exit_status: ".join(', ', @error_messages); +} +else { + $status_string = "OK: no SMART errors detected"; +} + +print "$status_string|$perf_string\n"; +exit $ERRORS{$exit_status}; + +sub print_help { + print_revision($basename,$revision); + print "Usage: $basename (--device= --interface=(ata|scsi)|-h|-v) [--debug]\n"; + print " --debug: show debugging information\n"; + print " -d/--device: a device to be SMART monitored, eg /dev/sda\n"; + print " -i/--interface: ata or scsi, depending upon the device's interface type\n"; + print " -h/--help: this help\n"; + print " -v/--version: Version number\n"; + support(); +} + +# escalate an exit status IFF it's more severe than the previous exit status +sub escalate_status { + my $requested_status = shift; + # no test for 'CRITICAL'; automatically escalates upwards + if ($requested_status eq 'WARNING') { + return if $exit_status eq 'CRITICAL'; + } + if ($requested_status eq 'UNKNOWN') { + return if $exit_status eq 'WARNING'; + return if $exit_status eq 'CRITICAL'; + } + $exit_status = $requested_status; +} diff --git a/nagios/files/check_system_pp b/nagios/files/check_system_pp new file mode 100644 index 00000000..19a3e5f9 --- /dev/null +++ b/nagios/files/check_system_pp @@ -0,0 +1,162 @@ +#!/bin/bash +# +# https://www.monitoringexchange.org/inventory/Check-Plugins/Operating-Systems/Linux/Check-Processes-and-Ports +# Usage: .//check_system_pp +# +# Description: +# This plugin determines whether the server +# is running properly. It will check the following: +# * Are all required processes running? +# * Are all the required TCP/IP ports open? +# +# Created: 27.01.2006 (FBA) +# +# Changes: 28.01.2006 added yellow check (FBA) +# 29.01.2006 change "px -ef" to "ps -ax" (FBA). Problems with long arguments +# 31.01.2006 added all OK Status with all procs and ports (FBA) +# 15.07.2006 change "ps -ax" to "ps ax" (FBA). Also problems with long arguments under RedHat 3/4 +# 17.07.2006 Plugin rewrite and bugfixes (Magnus Glantz) +# 19.07.2006 Removed utils.sh dependency. +# +# +# + +COMMON_SH_LIB=/usr/lib/nagios/plugins/isti-cnr/check_library.sh +if [ -f $COMMON_SH_LIB ] ; then + . $COMMON_SH_LIB +else + PLUGIN_DIR=/usr/lib/nagios/plugins + ISTI_PLUGDIR=$PLUGIN_DIR/isti-cnr +fi + +# We want the list of processes and ports to be customizable without editing this script +PP_CONF=$ISTI_PLUGDIR/check_system_pp.conf +if [ -f $PP_CONF ] ; then + . $PP_CONF +else +################################################################################## +# +# Processes to check + PROCLIST_RED="sshd" + PROCLIST_YELLOW="syslogd cron" + +# Ports to check + PORTLIST="22" + +################################################################################## +fi + +PATH="/usr/bin:/usr/sbin:/bin:/sbin" + +STATE_OK=0 +STATE_WARNING=1 +STATE_CRITICAL=2 +STATE_UNKNOWN=3 +STATE_DEPENDENT=4 + +print_gpl() { + echo "This program is free software; you can redistribute it and/or modify" + echo "it under the terms of the GNU General Public License as published by" + echo "the Free Software Foundation; either version 2 of the License, or" + echo "(at your option) any later version." + echo "" + echo "This program is distributed in the hope that it will be useful," + echo "but WITHOUT ANY WARRANTY; without even the implied warranty of" + echo "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" + echo "GNU General Public License for more details." + echo "" + echo "You should have received a copy of the GNU General Public License" + echo "along with this program; if not, write to the Free Software" + echo "Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA" +} + +print_help(){ + echo "" + echo "System process and port check script for Nagios." + echo "" + echo "Usage: ./check_system_pp" + echo "Website: http://www.nagiosexchange.org" + echo "https://www.monitoringexchange.org/inventory/Check-Plugins/Operating-Systems/Linux/Check-Processes-and-Ports" + echo "" + print_gpl +} + +while test -n "$1" +do + case "$1" in + *) print_help; exit $STATE_OK;; + esac +done + +check_processes_red() +{ + PROCESS="0" + ERROR_PROCS="" + for PROC in `echo $PROCLIST_RED`; do + if [ `ps -ef | grep -w $PROC | grep -v grep | wc -l` -lt 1 ]; then + PROCESS=1 + ERROR_PROCS="$ERROR_PROCS""$PROC "; + fi + done + + if [ $PROCESS -eq "1" ]; then + exit_red=$STATE_CRITICAL + elif [ $PROCESS -eq "0" ]; then + exit_red=$STATE_OK + fi +} + +check_processes_yellow() +{ + PROCESS="0" + WARNING_PROCS="" + for PROC in `echo $PROCLIST_YELLOW`; do + if [ `ps -ef | grep $PROC | grep -v grep | wc -l` -lt 1 ]; then + PROCESS=1 + WARNING_PROCS="$WARNING_PROCS""$PROC "; + fi + done + + if [ $PROCESS -eq "1" ]; then + exit_yellow=$STATE_WARNING + elif [ $PROCESS -eq "0" ]; then + exit_yellow=$STATE_OK + fi +} + +check_ports() +{ + PORTS="0" + ERROR_PORTS="" + for NUM in `echo $PORTLIST`; do + if [ `netstat -an | grep LISTEN | grep -w $NUM | grep -v grep | wc -l` -lt 1 ]; then + PORTS=1 + ERROR_PORTS="$ERROR_PORTS""$NUM "; + fi + done + + if [ $PORTS -eq "1" ]; then + exit_ports=$STATE_CRITICAL + elif [ $PORTS -eq "0" ]; then + exit_ports=$STATE_OK + fi +} + +check_processes_red +check_ports +check_processes_yellow + +final_exit=`expr $exit_ports + $exit_red + $exit_yellow` + +if [ $final_exit -eq "0" ]; then + echo "SYSTEM OK - All monitored resources OK. Processes: $PROCLIST_RED $PROCLIST_YELLOW. Ports: $PORTLIST." + exitstatus=$STATE_OK +elif [ $final_exit -eq "1" ]; then + echo "SYSTEM WARNING - Processes DOWN. ($WARNING_PROCS)." + exitstatus=$STATE_WARNING +elif [ $final_exit -ge "1" ]; then + echo "SYSTEM CRITICAL - Resources DOWN! Processes: $ERROR_PROCS $WARNING_PROCS. Ports: $ERROR_PORTS" + exitstatus=$STATE_CRITICAL +fi + +exit $exitstatus diff --git a/nagios/files/cleanup-leaked-ipvsems.sh b/nagios/files/cleanup-leaked-ipvsems.sh new file mode 100644 index 00000000..6e24aee7 --- /dev/null +++ b/nagios/files/cleanup-leaked-ipvsems.sh @@ -0,0 +1,27 @@ +#!/bin/bash + + +# semaphore leak su debian 6 col kernel backports. Il problema è del check nagios per l'hardware che usa le utility Dell. +# Workaround: individuare ed eliminare i semafori inutilizzati ( http://serverfault.com/questions/352026/anyone-know-how-to-fix-issues-with-omsa-on-red-hat-5-1-that-reports-no-controll ): + +# "One common non-obvious cause of this problem is system semaphore exhaustion. Check your system logs; if you see something like this: + +# Server Administrator (Shared Library): Data Engine EventID: 0 A semaphore set has to be created but the system limit for the maximum number of semaphore sets has been exceeded + +# then you're running out of semaphores. + +# You can run ipcs -s to list all of the semaphores currently allocated on your system and then use ipcrm -s to remove a semaphore (if you're reasonably sure it's no longer needed). You might also want to track down the program that created them (using information from ipcs -s -i ) to make sure it's not leaking semaphores. In my experience, though, most leaks come from programs that were interrupted (by segfaults or similar) before they could run their cleanup code. + +# If your system really needs all of the semaphores currently allocated, you can increase the number of semaphores available. Run sysctl -a | grep kernel.sem to see what the current settings are. The final number is the number of semaphores available on the system (normally 128). Copy that line into /etc/sysctl.conf, change the final number to a larger value, save it, and run sysctl -p to load the new settings." + +for id in $( ipcs -s | grep nagios | awk '{print $2}' ) ; do + SEM_ID_PROC=$( ipcs -s -i $id | grep -A1 pid | grep -v pid | awk '{print $5}') + ps auwwx | grep " $SEM_ID_PROC " | grep -v grep >/dev/null 2>&1 + RETVAL=$? + if [ $RETVAL -eq 1 ] ; then +# ipcs -s -i $id + ipcrm -s $id > /dev/null 2>&1 + fi +done + +exit 0 diff --git a/nagios/files/hardy-iotop-ppa b/nagios/files/hardy-iotop-ppa new file mode 100644 index 00000000..1e2c8161 --- /dev/null +++ b/nagios/files/hardy-iotop-ppa @@ -0,0 +1 @@ +deb http://ppa.launchpad.net/tormodvolden/ubuntu hardy main diff --git a/nagios/files/linux.dell.com.sources.list b/nagios/files/linux.dell.com.sources.list new file mode 100644 index 00000000..ea69e38d --- /dev/null +++ b/nagios/files/linux.dell.com.sources.list @@ -0,0 +1 @@ +deb http://linux.dell.com/repo/community/deb/latest / diff --git a/nagios/files/research-infrastructures.eu.system.list b/nagios/files/research-infrastructures.eu.system.list new file mode 100644 index 00000000..37540b61 --- /dev/null +++ b/nagios/files/research-infrastructures.eu.system.list @@ -0,0 +1,2 @@ +deb http://ppa.research-infrastructures.eu/system stable main + diff --git a/nagios/files/show_users b/nagios/files/show_users new file mode 100755 index 00000000..e9a94d18 --- /dev/null +++ b/nagios/files/show_users @@ -0,0 +1,242 @@ +#!/bin/bash +# +# Copyright Hari Sekhon 2007 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# + +# Nagios Plugin to list all currently logged on users to a system. + +# Modified by Rob MacKenzie, SFU - rmackenz@sfu.ca +# Added the -w and -c options to check for number of users. + + +version=0.3 + +# This makes coding much safer as a varible typo is caught +# with an error rather than passing through +set -u + +# Note: resisted urge to use <<<, instead sticking with | +# in case anyone uses this with an older version of bash +# so no bash bashers please on this + +# Standard Nagios exit codes +OK=0 +WARNING=1 +CRITICAL=2 +UNKNOWN=3 + +usage(){ + echo "usage: ${0##*/} [--simple] [ --mandatory username ] [ --unauthorized username ] [ --whitelist username ]" + echo + echo "returns a list of users on the local machine" + echo + echo " -s, --simple show users without the number of sessions" + echo " -m username, --mandatory username" + echo " Mandatory users. Return CRITICAL if any of these users are not" + echo " currently logged in" + echo " -b username, --blacklist username" + echo " Unauthorized users. Returns CRITICAL if any of these users are" + echo " logged in. This can be useful if you have a policy that states" + echo " that you may not have a root shell but must instead only use " + echo " 'sudo command'. Specifying '-u root' would alert on root having" + echo " a session and hence catch people violating such a policy." + echo " -a username, --whitelist username" + echo " Whitelist users. This is exceptionally useful. If you define" + echo " a bunch of users here that you know you use, and suddenly" + echo " there is a user session open for another account it could" + echo " alert you to a compromise. If you run this check say every" + echo " 3 minutes, then any attacker has very little time to evade" + echo " detection before this trips." + echo + echo " -m,-u and -w can be specified multiple times for multiple users" + echo " or you can use a switch a single time with a comma separated" + echo " list." + echo " -w integer, --warning integer" + echo " Set WARNING status if more than INTEGER users are logged in" + echo " -c integer, --critical integer" + echo " Set CRITICAL status if more than INTEGER users are logged in" + echo + echo + echo " -V --version Print the version number and exit" + echo + exit $UNKNOWN +} + +simple="" +mandatory_users="" +unauthorized_users="" +whitelist_users="" +warning_users=0 +critical_users=0 + +while [ "$#" -ge 1 ]; do + case "$1" in +-h|--help) usage + ;; +-V|--version) echo $version + exit $UNKNOWN + ;; +-s|--simple) simple=true + ;; +-m|--mandatory) if [ "$#" -ge 2 ]; then + if [ -n "$mandatory_users" ]; then + mandatory_users="$mandatory_users $2" + else + mandatory_users="$2" + fi + shift + else + usage + fi + ;; +-b|--blacklist) if [ "$#" -ge 2 ]; then + if [ -n "$unauthorized_users" ]; then + unauthorized_users="$unauthorized_users $2" + else + unauthorized_users="$2" + fi + shift + else + usage + fi + ;; +-a|--whitelist) if [ "$#" -ge 2 ]; then + if [ -n "$whitelist_users" ]; then + whitelist_users="$whitelist_users $2" + else + whitelist_users="$2" + fi + shift + else + usage + fi + ;; +-w|--warning) if [ "$#" -ge 2 ]; then + if [ $2 -ge 1 ]; then + warning_users=$2 + fi + shift + else + usage + fi + ;; +-c|--critical) if [ "$#" -ge 2 ]; then + if [ $2 -ge 1 ]; then + critical_users=$2 + fi + shift + else + usage + fi + ;; + *) usage + ;; + esac + shift +done + +mandatory_users="`echo $mandatory_users | tr ',' ' '`" +unauthorized_users="`echo $unauthorized_users | tr ',' ' '`" +whitelist_users="`echo $whitelist_users | tr ',' ' '`" + +# Must be a list of usernames only. +userlist="`who|grep -v "^ *$"|awk '{print $1}'|sort`" +usercount="`who|wc -l`" + +errormsg="" +exitcode=$OK + +if [ -n "$userlist" ]; then + if [ -n "$mandatory_users" ]; then + missing_users="" + for user in $mandatory_users; do + if ! echo "$userlist"|grep "^$user$" >/dev/null 2>&1; then + missing_users="$missing_users $user" + exitcode=$CRITICAL + fi + done + for user in `echo $missing_users|tr " " "\n"|sort -u`; do + errormsg="${errormsg}user '$user' not logged in. " + done + fi + + if [ -n "$unauthorized_users" ]; then + blacklisted_users="" + for user in $unauthorized_users; do + if echo "$userlist"|sort -u|grep "^$user$" >/dev/null 2>&1; then + blacklisted_users="$blacklisted_users $user" + exitcode=$CRITICAL + fi + done + for user in `echo $blacklisted_users|tr " " "\n"|sort -u`; do + errormsg="${errormsg}Unauthorized user '$user' is logged in! " + done + fi + + if [ -n "$whitelist_users" ]; then + unwanted_users="" + for user in `echo "$userlist"|sort -u`; do + if ! echo $whitelist_users|tr " " "\n"|grep "^$user$" >/dev/null 2>&1; then + unwanted_users="$unwanted_users $user" + exitcode=$CRITICAL + fi + done + for user in `echo $unwanted_users|tr " " "\n"|sort -u`; do + errormsg="${errormsg}Unauthorized user '$user' detected! " + done + fi + + if [ $warning_users -ne 0 -o $critical_users -ne 0 ]; then + unwanted_users=`who` + if [ $usercount -ge $critical_users -a $critical_users -ne 0 ]; then + exitcode=$CRITICAL + elif [ $usercount -ge $warning_users -a $warning_users -ne 0 ]; then + exitcode=$WARNING + fi + OLDIFS="$IFS" + IFS=$'\n' + for user in $unwanted_users; do + errormsg="${errormsg} --- $user" + done + IFS="$OLDIFS" + fi + + if [ "$simple" == "true" ] + then + finallist=`echo "$userlist"|uniq` + else + finallist=`echo "$userlist"|uniq -c|awk '{print $2"("$1")"}'` + fi +else + finallist="no users logged in" +fi + +if [ "$exitcode" -eq $OK ]; then + echo "USERS OK:" $finallist + exit $OK +elif [ "$exitcode" -eq $WARNING ]; then + echo "USERS WARNING: [users: "$finallist"]" $errormsg + exit $WARNING +elif [ "$exitcode" -eq $CRITICAL ]; then + echo "USERS CRITICAL: [users: "$finallist"]" $errormsg + exit $CRITICAL +else + echo "USERS UNKNOWN:" $errormsg"[users: "$finallist"]" + exit $UNKNOWN +fi + +exit $UNKNOWN diff --git a/nagios/handlers/main.yml b/nagios/handlers/main.yml new file mode 100644 index 00000000..a856b575 --- /dev/null +++ b/nagios/handlers/main.yml @@ -0,0 +1,19 @@ +- name: Reload NRPE server + service: name=nagios-nrpe-server state=reloaded + +- name: Restart NRPE server + service: name=nagios-nrpe-server state=restarted + +- name: Restart Nagios server + service: name=nagios3 state=restarted + +- name: Reload Nagios server + service: name=nagios3 state=reloaded + +- name: Update apt cache + apt: update_cache=yes + ignore_errors: True + +- name: Start Dell OMSA + service: name=dataeng state=restarted enabled=yes + ignore_errors: True diff --git a/nagios/tasks/dell-omsa.yml b/nagios/tasks/dell-omsa.yml new file mode 100644 index 00000000..c9be65e3 --- /dev/null +++ b/nagios/tasks/dell-omsa.yml @@ -0,0 +1,144 @@ +--- +# The internal repository is used for the check-openmanage nagios plugin: +# http://folk.uio.no/trondham/software/check_openmanage.html +- name: research infrastructures system repository on ubuntu + apt_repository: repo='{{ item }}' + with_items: + - deb http://ppa.research-infrastructures.eu/system stable main + when: is_ubuntu + register: update_apt_cache + tags: + - dell + - nagios + notify: Update apt cache + +- name: research infrastructures system repository on debian + copy: src={{ item }} dest=/etc/apt/sources.list.d/{{ item }} + with_items: + - research-infrastructures.eu.system.list + when: is_debian6 + register: update_apt_cache + tags: + - dell + - nagios + +- name: Install the Dell apt repository + template: src={{ item }}.j2 dest=/etc/apt/sources.list.d/{{ item }} + with_items: + - linux.dell.com.sources.list + when: is_not_debian6 + register: update_apt_cache + tags: + - dell + - nagios + +- action: apt_key url=http://ppa.research-infrastructures.eu/system/keys/system-archive.asc state=present + tags: + - dell + - nagios + ignore_errors: True + +#- action: apt_key id=1285491434D8786F state=present +- shell: gpg --keyserver pool.sks-keyservers.net --recv-key 1285491434D8786F ; gpg -a --export 1285491434D8786F | apt-key add - + tags: + - dell + - nagios + +- name: Update apt cache + apt: update_cache=yes + when: update_apt_cache.changed + tags: + - dell + - nagios + +- name: Install the Dell OMSA packages dependencies + apt: pkg={{ item }} state=installed + with_items: + - libnet-snmp-perl + - libconfig-tiny-perl + - ipmitool + - check-openmanage + tags: + - dell + - nagios + +- name: Install the Dell OMSA packages dependencies + apt: pkg={{ item }} state=installed + with_items: + - python-requests + register: requests_pkg + ignore_errors: True + tags: + - dell + - nagios + +- name: Install the python-pip package if requests is not available as a package + apt: pkg={{ item }} state=installed + with_items: + - python-pip + when: requests_pkg|failed + tags: + - dell + - nagios + +- name: Install the python-requests package via pip if it s not available as package + pip: name={{ item }} state=latest use_mirrors=no + with_items: + - requests + when: requests_pkg|failed + tags: + - dell + - nagios + +- name: Get the old libssl0.9.8_9.9.8 needed by del Dell OMSA utilities on debian 7 + get_url: url=http://ppa.research-infrastructures.eu/dell-legacy/libssl0.9.8_0.9.8o-4squeeze14_amd64.deb dest=/var/lib/libssl0.9.8_0.9.8o-4squeeze14_amd64.deb + when: is_debian7 + register: libssl_legacy + tags: + - dell + - nagios + +- name: Install libssl0.9.8_0.9.8o-4squeeze14_amd64.deb on debian 7 + shell: /usr/bin/dpkg -i /var/lib/libssl0.9.8_0.9.8o-4squeeze14_amd64.deb + when: libssl_legacy.changed + tags: + - dell + - nagios + +- name: Install the Dell OMSA packages + apt: pkg={{ item }} state=installed force=yes + with_items: + - syscfg + when: is_not_debian6 + tags: + - dell + - nagios + +- name: Install the Dell OMSA packages + apt: pkg={{ item }} state=installed force=yes + with_items: + - srvadmin-base + - srvadmin-idrac + - srvadmin-storageservices + notify: + Start Dell OMSA + tags: + - dell + - nagios + +- name: Install the check_warranty plugin for dell systems + copy: src={{ item }} dest={{ nagios_isti_plugdir }}/{{ item }} owner=root group=nagios mode=0750 + with_items: + - check_dell_warranty.py + tags: + - dell + - nagios + +- name: Install a cron job that removes the leaked semaphores created by the nagios check of Dell hardware status + copy: src={{ item }} dest=/etc/cron.daily/{{ item }} owner=root group=root mode=0555 + with_items: + - cleanup-leaked-ipvsems.sh + tags: + - dell + - nagios + diff --git a/nagios/tasks/hardware-checks.yml b/nagios/tasks/hardware-checks.yml new file mode 100644 index 00000000..26d921fc --- /dev/null +++ b/nagios/tasks/hardware-checks.yml @@ -0,0 +1,45 @@ +--- +# The original check_linux_raid is often buggy +- name: Install some plugins that check hardware parts + copy: src={{ item }} dest={{ nagios_isti_plugdir }}/{{ item }} owner=root group=nagios mode=0750 + with_items: + - check_linux_raid + - check_smart + tags: + - nagios-hw + - nagios + +- name: Install some packages needed by the hardware checks + apt: pkg={{ item }} state=installed + with_items: + - smartmontools + tags: + - nagios-hw + - nagios + +- name: Configure the smart server to run + lineinfile: name=/etc/default/smartmontools regexp="^start_smartd=" line="start_smartd=yes" + tags: + - nagios-hw + - nagios + +- name: Ensure that the smart server is enabled and running + service: name=smartmontools state=started enabled=yes + tags: + - nagios-hw + - nagios + +- name: Configure NRPE to allow arguments. Needed by the check_smart plugin + lineinfile: name=/etc/nagios/nrpe.cfg regexp="^dont_blame_nrpe=" line="dont_blame_nrpe=0" + notify: Restart NRPE server + tags: + - nagios-hw + - nagios + - nrpe + +- name: nagios needs root to execute some hardware checks. We do it via sudo + template: src=nagios-hw.sudoers.j2 dest=/etc/sudoers.d/nagios-hw owner=root group=root mode=0440 + tags: + - nagios-hw + - nagios + - nrpe diff --git a/nagios/tasks/main.yml b/nagios/tasks/main.yml new file mode 100644 index 00000000..7b22eff5 --- /dev/null +++ b/nagios/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- include: nagios.yml +- include: dell-omsa.yml + when: dell_system is defined +- include: postgresql-nagios.yml + when: nagios_postgresql_check is defined and nagios_postgresql_check +#- include: nsca.yml +- include: hardware-checks.yml + when: nagios_hw is defined and nagios_hw diff --git a/nagios/tasks/nagios.yml b/nagios/tasks/nagios.yml new file mode 100644 index 00000000..5fa60bf0 --- /dev/null +++ b/nagios/tasks/nagios.yml @@ -0,0 +1,71 @@ +--- +- name: Install the nagios packages + apt: pkg={{ item }} state=installed + with_items: + - nagios-plugins + - nagios-plugins-basic + - nagios-plugins-standard + - nagios-nrpe-server + - sudo + when: is_not_debian_less_than_6 + tags: + - nagios + +- name: debian 6 has other nagios plugins + apt: pkg={{ item }} state=installed + with_items: + - nagios-plugins-contrib + when: is_debian6 + tags: + - nagios + +- name: Create the directory where our local plugins are installed + file: path={{ nagios_isti_plugdir }} state=directory + tags: + - nagios + +- name: Install the generic shell library used by some custom checks + template: src=check_library.sh.j2 dest={{ nagios_isti_plugdir }}/check_library.sh owner=root group=root mode=0644 + tags: + - nagios + +- name: Install plugins that can be useful on all the installed servers + copy: src={{ item }} dest={{ nagios_isti_plugdir }}/{{ item }} owner=root group=nagios mode=0755 + with_items: + - check_system_pp + - show_users + - check_netint.pl + tags: + - nagios + +- name: Install the global nrpe commands file + template: src=common-nrpe.cfg.j2 dest=/etc/nagios/nrpe.d/common.cfg owner=root group=root mode=444 + notify: + - Reload NRPE server + tags: + - nrpe + - nagios + +- name: set the NRPE ACL + action: | + lineinfile name=/etc/nagios/nrpe.cfg regexp="allowed_hosts=" line="allowed_hosts=127.0.0.1,{{ nagios_monitoring_server_ip }}" + notify: + - Reload NRPE server + tags: + - nagios + - nrpe + +- name: set the NRPE default timeout + lineinfile: name=/etc/nagios/nrpe.cfg regexp="command_timeout=" line="command_timeout={{ nrpe_command_timeout }}" + notify: + - Reload NRPE server + tags: + - nagios + - nrpe + +- name: nagios needs root to execute some commands. We do it via sudo + template: src=nagios.sudoers.j2 dest=/etc/sudoers.d/nagios owner=root group=root mode=0440 + tags: + - nagios + - nrpe + diff --git a/nagios/tasks/nsca.yml b/nagios/tasks/nsca.yml new file mode 100644 index 00000000..626ef2e0 --- /dev/null +++ b/nagios/tasks/nsca.yml @@ -0,0 +1,19 @@ +--- +- name: Configure the nsca client + template: src=send_nsca.j2 dest=/etc/send_nsca.cfg owner=root group=root mode=400 + tags: + - nsca + +- apt: pkg={{ item }} state=installed + with_items: + - nsca-client + when: '(({{ is_not_ubuntu_less_than_precise }}) == True) or (({{ is_debian7 }}) == True)' + tags: + - nsca + +- apt: pkg={{ item }} state=installed + with_items: + - nsca + when: "(({{ is_not_debian_less_than_6 }}) == True) and (({{ is_ubuntu_less_than_precise }}) == True)" + tags: + - nsca diff --git a/nagios/tasks/postgresql-nagios.yml b/nagios/tasks/postgresql-nagios.yml new file mode 100644 index 00000000..da995ab8 --- /dev/null +++ b/nagios/tasks/postgresql-nagios.yml @@ -0,0 +1,35 @@ +--- +- name: Ensure that the isti local nagios plugins directory exists + file: dest={{ nagios_isti_plugdir }} owner=root group=root state=directory + tags: + - nrpe + - nagios + +- name: Install the postgresql backup nagios check + copy: src=check_postgresql_backup dest={{ nagios_isti_plugdir }}/check_postgresql_backup owner=root group=root mode=0555 + tags: + - nrpe + - nagios + +- name: Install the packages needed to check postgres via nagios + apt: pkg={{ item }} state=installed + with_items: + - check-postgres + tags: + - nrpe + - nagios + +- name: Install the sudoers file needed by some nagios checks + template: src=postgresql-sudoers.j2 dest=/etc/sudoers.d/postgresql owner=root group=root mode=440 + tags: + - nrpe + - nagios + +- name: Install the nrpe configuration for check_postgres + template: src=postgresql-nrpe.cfg.j2 dest=/etc/nagios/nrpe.d/postgresql-nrpe.cfg owner=root group=root mode=444 + notify: + - Reload NRPE server + tags: + - nrpe + - nagios + diff --git a/nagios/templates/check_library.sh.j2 b/nagios/templates/check_library.sh.j2 new file mode 100644 index 00000000..3072b50c --- /dev/null +++ b/nagios/templates/check_library.sh.j2 @@ -0,0 +1,10 @@ + +ISTI_PLUGDIR={{ nagios_isti_plugdir }} +COMMON_LIB={{ nagios_common_lib }} + +if [ -d {{ nagios_plugins_dir }} ] ; then + PLUGIN_DIR={{ nagios_plugins_dir }} +elif [ -d {{ nagios_centos_plugins_dir }} ] ; then + PLUGIN_DIR={{ nagios_centos_plugins_dir }} +fi + diff --git a/nagios/templates/common-nrpe.cfg.j2 b/nagios/templates/common-nrpe.cfg.j2 new file mode 100644 index 00000000..839d3b42 --- /dev/null +++ b/nagios/templates/common-nrpe.cfg.j2 @@ -0,0 +1,40 @@ +# Debian 4 doesn't support "-A -i options" +command[global_check_disk]={{ nagios_plugins_dir }}/check_disk -w {{ nagios_check_disk_w }}% -c {{ nagios_check_disk_c }}% -X tmpfs -X proc -X sysfs -X devpts -X dev -A -i /mnt/.* +#command[global_check_disk]={{ nagios_plugins_dir }}/check_disk -w {{ nagios_check_disk_w }}% -c {{ nagios_check_disk_c }}% -X tmpfs -X proc -X sysfs -X devpts + +command[global_check_load]={{ nagios_plugins_dir }}/check_load -w 20,15,10 -c 35,30,25 +command[global_check_zombie_procs]={{ nagios_plugins_dir }}/check_procs -w 5 -c 10 -s Z +command[global_check_total_procs]={{ nagios_plugins_dir }}/check_procs -w 800 -c 1000 + +# Ganglia gmond server +command[global_check_gmond]={{ nagios_plugins_dir }}/check_procs -w 1:1 -c 1:1 -C gmond + +# Munin node +command[global_check_munin]={{ nagios_plugins_dir }}/check_procs -w 1:1 -c 1:1 -C munin-node + +# Show number and username of the logged users +command[global_show_users]={{ nagios_isti_plugdir }}/show_users -a {{ nagios_allowed_users }} + +# Generic script that monitors the existance of a given processes list +command[global_check_system_pp]={{ nagios_isti_plugdir }}/check_system_pp + +# Linux RAID check +command[global_check_linux_raid]={{ nagios_isti_plugdir }}/check_linux_raid + +# Disks S.M.A.R.T. check +command[global_check_smart]={{ nagios_isti_plugdir }}/check_smart -d $ARG1$ -i $ARG2$ + +# Network interfaces +command[global_net_interfaces]={{ nagios_isti_plugdir }}/check_netint.pl -K -f -e + +# Restart ntp (via handler) +command[global_restart_ntp]=/usr/bin/sudo /etc/init.d/ntp start + +# Restart gmond (via handler) +command[global_restart_gmond]=/usr/bin/sudo /etc/init.d/ganglia-monitor start + +# Restart munin node (via handler) +command[global_restart_munin]=/usr/bin/sudo /etc/init.d/munin-node start + + + diff --git a/nagios/templates/linux.dell.com.sources.list.j2 b/nagios/templates/linux.dell.com.sources.list.j2 new file mode 100644 index 00000000..53833262 --- /dev/null +++ b/nagios/templates/linux.dell.com.sources.list.j2 @@ -0,0 +1 @@ +deb http://linux.dell.com/repo/community/ubuntu {{ ansible_distribution_version }} openmanage diff --git a/nagios/templates/nagios-hw.sudoers.j2 b/nagios/templates/nagios-hw.sudoers.j2 new file mode 100644 index 00000000..dd3e9e6a --- /dev/null +++ b/nagios/templates/nagios-hw.sudoers.j2 @@ -0,0 +1 @@ +nagios ALL=(root) NOPASSWD: /usr/sbin/smartctl diff --git a/nagios/templates/nagios.sudoers.j2 b/nagios/templates/nagios.sudoers.j2 new file mode 100644 index 00000000..65f8e8a2 --- /dev/null +++ b/nagios/templates/nagios.sudoers.j2 @@ -0,0 +1 @@ +nagios ALL=(ALL) NOPASSWD: {{ nagios_plugins_dir }}/, {{ nagios_isti_plugdir }}/, {{ nagios_centos_plugins_dir }}/, /etc/init.d/, /usr/sbin/service, /sbin/service diff --git a/nagios/templates/nrpe_local.cfg.j2 b/nagios/templates/nrpe_local.cfg.j2 new file mode 100644 index 00000000..f6fcd804 --- /dev/null +++ b/nagios/templates/nrpe_local.cfg.j2 @@ -0,0 +1 @@ +include_dir={{ nrpe_include_dir }} diff --git a/nagios/templates/postgresql-nrpe.cfg.j2 b/nagios/templates/postgresql-nrpe.cfg.j2 new file mode 100644 index 00000000..e10adfee --- /dev/null +++ b/nagios/templates/postgresql-nrpe.cfg.j2 @@ -0,0 +1,20 @@ +# Check the status of the postgresql local dumps +command[{{ monitoring_group_name }}_check_postgresql_backup]={{ nagios_isti_plugdir }}/check_postgresql_backup + +command[{{ monitoring_group_name }}_check_postgresql_connection]=/usr/bin/sudo -u postgres /usr/bin/check_postgres_connection -db template1 + +command[{{ monitoring_group_name }}_check_postgresql_timesync]=/usr/bin/sudo -u postgres /usr/bin/check_postgres_timesync -db template1 + +command[{{ monitoring_group_name }}_check_postgresql_backends]=/usr/bin/sudo -u postgres /usr/bin/check_postgres_backends -db template1 + +command[{{ monitoring_group_name }}_check_postgresql_commitratio]=/usr/bin/sudo -u postgres /usr/bin/check_postgres_commitratio -db template1 + +command[{{ monitoring_group_name }}_check_postgresql_database_size]=/usr/bin/sudo -u postgres /usr/bin/check_postgres_database_size -db template1 -w {{ nagios_psql_db_size_w }} -c {{ nagios_psql_db_size_c }} + +{% for db in psql_db_data %} +command[{{ monitoring_group_name }}_check_postgresql_{{ db.name }}_query]=/usr/bin/sudo -u postgres /usr/bin/check_postgres_query_time -db {{ db.name }} -w {{ nagios_psql_query_time_w }} -c {{ nagios_psql_query_time_c }} + +command[{{ monitoring_group_name }}_check_postgresql_{{ db.name }}_dbstats]=/usr/bin/sudo -u postgres /usr/bin/check_postgres_dbstats -db {{ db.name }} + +command[{{ monitoring_group_name }}_check_postgresql_{{ db.name }}_sequence]=/usr/bin/sudo -u postgres /usr/bin/check_postgres_sequence -db {{ db.name }} +{% endfor %} diff --git a/nagios/templates/postgresql-sudoers.j2 b/nagios/templates/postgresql-sudoers.j2 new file mode 100644 index 00000000..d4325d5a --- /dev/null +++ b/nagios/templates/postgresql-sudoers.j2 @@ -0,0 +1 @@ +nagios ALL=(postgres) NOPASSWD: /usr/bin/check_postgres_* diff --git a/nagios/templates/send_nsca.j2 b/nagios/templates/send_nsca.j2 new file mode 100644 index 00000000..aa2308c6 --- /dev/null +++ b/nagios/templates/send_nsca.j2 @@ -0,0 +1,2 @@ +password={{ nsca_password }} +decryption_method={{ nsca_encryption }} diff --git a/nemis-precise-backports-repo/tasks/main.yml b/nemis-precise-backports-repo/tasks/main.yml new file mode 100644 index 00000000..932c3c8f --- /dev/null +++ b/nemis-precise-backports-repo/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: apt key for the internal precise-backports repository + apt_key: url=http://ppa.research-infrastructures.eu/precise-backports/keys/precise-backports.asc state=present + when: is_precise + register: update_apt_cache + tags: + - apt + +- name: Install the precise backports apt repository + apt_repository: repo='{{ item }}' + with_items: + - deb http://ppa.research-infrastructures.eu/precise-backports precise main + when: is_precise + register: update_apt_cache + tags: + - apt + +- name: Update the apt cache + apt: update_cache=yes + when: ( update_apt_cache | changed ) + ignore_errors: True + tags: + - apt + diff --git a/nginx/defaults/main.yml b/nginx/defaults/main.yml new file mode 100644 index 00000000..afc7fbf9 --- /dev/null +++ b/nginx/defaults/main.yml @@ -0,0 +1,6 @@ +--- +nginx_use_ldap_pam_auth: False +nginx_pam_svc_name: nginx +nginx_ldap_uri: "ldap://ldap.sub.research-infrastructures.eu" +nginx_ldap_base_dn: "dc=research-infrastructures,dc=eu" +nginx_enabled: "Yes" diff --git a/nginx/files/nginx.pam b/nginx/files/nginx.pam new file mode 100644 index 00000000..f94005a8 --- /dev/null +++ b/nginx/files/nginx.pam @@ -0,0 +1,26 @@ + +# +auth [success=2 default=ignore] pam_unix.so nullok_secure +auth [success=1 default=ignore] pam_ldap.so +auth requisite pam_deny.so +auth required pam_permit.so + +# +account [success=2 new_authtok_reqd=done default=ignore] pam_unix.so +account [success=1 default=ignore] pam_ldap.so +account requisite pam_deny.so +account required pam_permit.so + +# +password [success=1 default=ignore] pam_unix.so obscure sha512 +password [success=1 user_unknown=ignore default=die] pam_ldap.so use_authtok try_first_pass +password requisite pam_deny.so +password required pam_permit.so + +# +session [default=1] pam_permit.so +session requisite pam_deny.so +session required pam_permit.so +session optional pam_umask.so +session required pam_unix.so +session optional pam_ldap.so diff --git a/nginx/handlers/main.yml b/nginx/handlers/main.yml new file mode 100644 index 00000000..04c7fb28 --- /dev/null +++ b/nginx/handlers/main.yml @@ -0,0 +1,7 @@ +--- +- name: Reload nginx + service: name=nginx state=reloaded + +- name: Restart nginx + service: name=nginx state=restarted + diff --git a/nginx/meta/main.yml b/nginx/meta/main.yml new file mode 100644 index 00000000..b20d9ba9 --- /dev/null +++ b/nginx/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - role: '../../library/roles/ldap-client-config' + when: nginx_use_ldap_pam_auth diff --git a/nginx/tasks/main.yml b/nginx/tasks/main.yml new file mode 100644 index 00000000..69e2426f --- /dev/null +++ b/nginx/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- include: nginx.yml +- include: pam-ldap.yml diff --git a/nginx/tasks/nginx.yml b/nginx/tasks/nginx.yml new file mode 100644 index 00000000..cf62d3f3 --- /dev/null +++ b/nginx/tasks/nginx.yml @@ -0,0 +1,28 @@ +--- +- name: Install the nginx web server + apt: pkg={{ item }} state=installed + with_items: + - nginx-full + when: not nginx_use_ldap_pam_auth + tags: + - nginx + +- name: Install the nginx web server if we need ldap auth via pam + apt: pkg={{ item }} state=installed + with_items: + - nginx-extras + when: nginx_use_ldap_pam_auth + tags: + - nginx + +- name: remove nginx default config + file: dest=/etc/nginx/sites-enabled/default state=absent + notify: + Reload nginx + tags: + - nginx + +- name: Ensure that the webserver is running + service: name=nginx state=started enabled={{ nginx_enabled }} + tags: + - nginx diff --git a/nginx/tasks/pam-ldap.yml b/nginx/tasks/pam-ldap.yml new file mode 100644 index 00000000..9f37f629 --- /dev/null +++ b/nginx/tasks/pam-ldap.yml @@ -0,0 +1,8 @@ +--- +- name: Install pam service for nginx + copy: src=nginx.pam dest=/etc/pam.d/{{ nginx_pam_svc_name }} + notify: Reload nginx + when: nginx_use_ldap_pam_auth + tags: + - nginx + diff --git a/nginx/templates/ldap.conf.j2 b/nginx/templates/ldap.conf.j2 new file mode 100644 index 00000000..fba620ac --- /dev/null +++ b/nginx/templates/ldap.conf.j2 @@ -0,0 +1,11 @@ +# The distinguished name of the search base. +base {{ nginx_ldap_base_dn }} + +# Another way to specify your LDAP server is to provide an +uri {{ nginx_ldap_uri }} + +# The LDAP version to use (defaults to 3 +# if supported by client library) +ldap_version 3 + +nss_initgroups_ignoreusers avahi,backup,bin,daemon,games,gnats,irc,libuuid,list,lp,mail,man,messagebus,munin,news,nslcd,proxy,root,rstudio-server,sshd,sync,sys,syslog,uucp,www-data diff --git a/openjdk/defaults/main.yml b/openjdk/defaults/main.yml new file mode 100644 index 00000000..fa0ecd68 --- /dev/null +++ b/openjdk/defaults/main.yml @@ -0,0 +1,8 @@ +--- +openjdk_default: 7 +openjdk_pkg_state: installed +openjdk_version: + - '{{ openjdk_default }}' + +jdk_java_home: '/usr/lib/jvm/openjdk-{{ openjdk_default }}' + diff --git a/openjdk/handlers/main.yml b/openjdk/handlers/main.yml new file mode 100644 index 00000000..94fd652f --- /dev/null +++ b/openjdk/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: Set OpenJDK 6 as default + shell: update-java-alternatives -s java-1.6.0-openjdk-amd64 + +- name: Set OpenJDK 7 as default + shell: update-java-alternatives -s java-1.7.0-openjdk-amd64 + +- name: Set OpenJDK 8 as default + shell: update-java-alternatives -s java-1.8.0-openjdk-amd64 + +- name: Set the default OpenJDK + shell: update-java-alternatives -s java-'{{ item }}'-openjdk-amd64 + with_items: openjdk_default + diff --git a/openjdk/tasks/main.yml b/openjdk/tasks/main.yml new file mode 100644 index 00000000..fb90c148 --- /dev/null +++ b/openjdk/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: Update the apt cache, if needed + apt: update_cache=yes + when: openjdk_pkg_state == 'latest' + tags: + - jdk + +- name: install OpenJDK JRE + apt: pkg=openjdk-{{ item.0 }}-{{ item[1] }} state={{ openjdk_pkg_state }} + with_nested: + - openjdk_version + - [ 'jre', 'jre-headless', 'jdk' ] + notify: + Set the default OpenJDK + tags: + - jdk + +- name: Set fact jdk_installed + set_fact: jdk_installed=true + tags: + - jdk + diff --git a/oracle-jdk/defaults/main.yml b/oracle-jdk/defaults/main.yml new file mode 100644 index 00000000..d628bbc7 --- /dev/null +++ b/oracle-jdk/defaults/main.yml @@ -0,0 +1,12 @@ +--- +# Supported versions: 6,7,8 +jdk_default: 7 +# jdk_version is a dictionary because it's possible to install more than one version +jdk_version: + - '{{ jdk_default }}' +jdk_java_home: '/usr/lib/jvm/java-{{ jdk_default }}-oracle' +jdk_pkg_state: installed +# If we want a different oracle jdk set the following variables in the local playbook: +# jdk_java_home: /usr/lib/jvm/java-7-0-25 +# jdk_use_tarfile: True +# jdk_tarfile: oracle-jdk-7.0.25.tar.gz diff --git a/oracle-jdk/files/oracle-jdk-7.0.25.tar.gz b/oracle-jdk/files/oracle-jdk-7.0.25.tar.gz new file mode 100644 index 00000000..18f0ddfa Binary files /dev/null and b/oracle-jdk/files/oracle-jdk-7.0.25.tar.gz differ diff --git a/oracle-jdk/handlers/main.yml b/oracle-jdk/handlers/main.yml new file mode 100644 index 00000000..04eb05c0 --- /dev/null +++ b/oracle-jdk/handlers/main.yml @@ -0,0 +1,17 @@ +- name: Set Oracle JDK 6 as default + shell: update-java-alternatives -s java-6-oracle + +- name: Set Oracle JDK 7 as default + shell: update-java-alternatives -s java-7-oracle + +- name: Set Oracle JDK 8 as default + shell: update-java-alternatives -s java-8-oracle + +- name: Set the default Oracle JDK + command: update-java-alternatives -s java-{{ item }}-oracle + with_items: jdk_default + +- name: Update apt cache + apt: update_cache=yes + ignore_errors: true + diff --git a/oracle-jdk/tasks/main.yml b/oracle-jdk/tasks/main.yml new file mode 100644 index 00000000..bdbd1b59 --- /dev/null +++ b/oracle-jdk/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: setup the Oracle JDK repository + apt_repository: repo='ppa:webupd8team/java' + register: update_apt_cache + tags: + - jdk + +# Set the Oracle JDK license as accepted before installing the package, to avoid the confirmation box at configuration time. +# - name: Accept the Oracle Java license +# raw: echo 'oracle-java{{ item }}-installer shared/accepted-oracle-license-v1-1 select true' | debconf-set-selections +# with_items: jdk_version +# tags: +# - jdk + +- name: Accept the Oracle Java license + debconf: name='oracle-java{{ item }}-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' + with_items: jdk_version + tags: + - jdk + +- name: Update the apt cache + apt: update_cache=yes + when: update_apt_cache.changed or jdk_pkg_state == 'latest' + tags: + - jdk + +- name: Install the latest version of Oracle JDK + apt: pkg=oracle-java{{ item }}-installer state={{ jdk_pkg_state }} force=yes + when: jdk_use_tarfile is not defined or not jdk_use_tarfile + with_items: jdk_version + tags: + - jdk + +- name: Set the JDK default via update-alternatives + apt: pkg=oracle-java{{ item }}-set-default state={{ jdk_pkg_state }} force=yes + with_items: jdk_default + when: jdk_use_tarfile is not defined or not jdk_use_tarfile + notify: + Set the default Oracle JDK + when: jdk_default is defined + tags: + - jdk + +- name: Install a custom version of Oracle JDK from a tar file + unarchive: src={{ jdk_tarfile }} dest={{ jdk_java_home_prefix }} + when: jdk_use_tarfile is defined and jdk_use_tarfile + tags: + - jdk + +- name: Set fact jdk_installed + set_fact: jdk_installed=True + tags: + - jdk diff --git a/php-fpm/defaults/main.yml b/php-fpm/defaults/main.yml new file mode 100644 index 00000000..04048430 --- /dev/null +++ b/php-fpm/defaults/main.yml @@ -0,0 +1,60 @@ +--- +# +# IMPORTANT: the template will be used on a task that refers 'phpfpm_pools' inside a 'with_items' loop. So +# the variables into the template are all 'item.XXX' +# +phpfpm_service_enabled: True +phpfpm_remove_php_module: True + +phpfpm_base_dir: /etc/php5/fpm +php_fpm_packages: + - php5-fpm + +# Main confign file settings +# It can be 'syslog' +phpfpm_logdir: /var/log/php-fpm +phpfpm_error_logfile: /var/log/php5-fpm.log +phpfpm_syslog_facility: daemon +phpfpm_syslog_ident: php-fpm +phpfpm_log_level: notice +phpfpm_emergency_restart_threshold: 5 +phpfpm_emergency_restart_interval: 2m +phpfpm_process_control_timeout: 10s +phpfpm_set_process_max: False +phpfpm_process_max: 256 +phpfpm_set_event_mechanism: False +phpfpm_event_mechanism: epoll + + +# Pools settings +phpfpm_default_pool_name: "www" +phpfpm_remove_default_pool: False +phpfpm_use_default_template: True +phpfpm_create_users: False +phpfpm_default_user: www-data +phpfpm_default_group: www-data +phpfpm_default_listen: "127.0.0.1:9000" +phpfpm_default_allowed_clients: "127.0.0.1" +phpfpm_default_pm: "dynamic" +phpfpm_default_pm_max_children: "50" +phpfpm_default_pm_start_servers: "3" +phpfpm_default_pm_min_spare_servers: "1" +phpfpm_default_pm_max_spare_servers: "10" +phpfpm_default_pm_max_requests: "10000" +phpfpm_default_pm_status_enabled: False +phpfpm_default_pm_status_path: "/status" +phpfpm_default_ping_enabled: False +phpfpm_default_ping_path: "/ping" +phpfpm_default_ping_response: '{{ phpfpm_default_pool_name }}' +phpfpm_default_display_errors: "off" +phpfpm_default_log_errors: "on" +phpfpm_default_memory_limit: "64M" +phpfpm_default_request_terminate_timeout: "60s" +phpfpm_default_slowlog_timeout: "20s" +phpfpm_default_rlimit_files: "1024" +phpfpm_default_extensions: ".php" +phpfpm_default_context: '/' +phpfpm_session_prefix: '/var/lib/php5' + +phpfpm_pools: + - { pool_name: '{{ phpfpm_default_pool_name }}', app_context: '{{ phpfpm_default_context }}', user: '{{ phpfpm_default_user }}', group: '{{ phpfpm_default_group }}', listen: '{{ phpfpm_default_listen }}', allowed_clients: '{{ phpfpm_default_allowed_clients }}', pm: '{{ phpfpm_default_pm }}', pm_max_children: '{{ phpfpm_default_pm_max_children }}', pm_start_servers: '{{ phpfpm_default_pm_start_servers }}', pm_min_spare: '{{ phpfpm_default_pm_min_spare_servers }}', pm_max_spare: '{{ phpfpm_default_pm_max_spare_servers }}', pm_max_requests: '{{ phpfpm_default_pm_max_requests }}', pm_status_enabled: '{{ phpfpm_default_pm_status_enabled }}', pm_status_path: '{{ phpfpm_default_pm_status_path }}', ping_enabled: '{{ phpfpm_default_ping_enabled }}', ping_path: '{{ phpfpm_default_ping_path }}', ping_response: '{{ phpfpm_default_ping_response }}', display_errors: '{{ phpfpm_default_display_errors }}', log_errors: '{{ phpfpm_default_log_errors }}', memory_limit: '{{ phpfpm_default_memory_limit }}', slowlog_timeout: '{{ phpfpm_default_slowlog_timeout }}', rlimit_files: '{{ phpfpm_default_rlimit_files }}', php_extensions: '{{ phpfpm_default_extensions }}' } diff --git a/php-fpm/handlers/main.yml b/php-fpm/handlers/main.yml new file mode 100644 index 00000000..9fbf7053 --- /dev/null +++ b/php-fpm/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Reload php-fpm + service: name=php5-fpm state=reloaded + +- name: Restart php-fpm + service: name=php5-fpm state=restarted diff --git a/php-fpm/tasks/main.yml b/php-fpm/tasks/main.yml new file mode 100644 index 00000000..73f8cba7 --- /dev/null +++ b/php-fpm/tasks/main.yml @@ -0,0 +1,76 @@ +--- +# php as a standalone service +- name: The nagios and ganglia web interfaces use php-fpm + apt: pkg={{ item }} state=present + with_items: php_fpm_packages + tags: + - php + +- name: Set the timezone if we have one + ini_file: dest={{ phpfpm_base_dir }}/php.ini section=Date option=date.timezone value={{ timezone }} backup=yes + when: timezone is defined + notify: Reload php-fpm + tags: + - php + - php_ini + +- name: remove php-fpm default pool + file: dest={{ phpfpm_base_dir }}/pool.d/www.conf state=absent + when: phpfpm_remove_default_pool + notify: Restart php-fpm + tags: + - php + +- name: Create the users under the php-fpm processes will run + user: name={{ item.user }} comment="{{ item.user }}" home=/dev/null createhome=no shell=/sbin/nologin + with_items: phpfpm_pools + when: phpfpm_create_users + notify: Restart php-fpm + tags: + - php + - fpm_pool + +- name: Create the directories where to store the sessions files. One for each pool + file: dest={{ phpfpm_session_prefix }}/{{ item.pool_name }} owner={{ item.user }} group=root mode=0750 state=directory + with_items: phpfpm_pools + when: phpfpm_use_default_template + tags: + - php + - fpm_pool + +- name: Create the directories where to store the log files + file: dest={{ phpfpm_logdir }} owner=root group=root mode=0750 state=directory + tags: + - php + - fpm_pool + +- name: Install the php-fpm logrotate file + template: src=php-fpm.logrotate.j2 dest=/etc/logrotate.d/php-fpm owner=root group=root mode=0444 + tags: + - php + - fpm_conf + +- name: Install the php-fpm main config file + template: src=php-fpm.conf.j2 dest={{ phpfpm_base_dir }}/php-fpm.conf owner=root group=root mode=0444 + notify: Restart php-fpm + tags: + - php + - fpm_pool + - fpm_conf + +- name: Install the php-fpm pools + template: src=php-fpm-pool.conf.j2 dest={{ phpfpm_base_dir }}/pool.d/{{ item.pool_name }}.conf owner=root group=root mode=0444 + with_items: phpfpm_pools + when: phpfpm_use_default_template + notify: Restart php-fpm + tags: + - php + - fpm_conf + - fpm_pool + - fpm_pool_conf + +- name: Ensure that the php-fpm service is started and enabled + service: name=php5-fpm state=started enabled=yes + tags: + - php + diff --git a/php-fpm/templates/php-fpm-pool.conf.j2 b/php-fpm/templates/php-fpm-pool.conf.j2 new file mode 100644 index 00000000..d6847432 --- /dev/null +++ b/php-fpm/templates/php-fpm-pool.conf.j2 @@ -0,0 +1,293 @@ +; Start a new pool named 'www'. +[{{ item.pool_name }}] + +; The address on which to accept FastCGI requests. +; Valid syntaxes are: +; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific address on +; a specific port; +; 'port' - to listen on a TCP socket to all addresses on a +; specific port; +; '/path/to/unix/socket' - to listen on a unix socket. +; Note: This value is mandatory. +listen = {{ item.listen }} + +; Set listen(2) backlog. A value of '-1' means unlimited. +; Default Value: -1 +;listen.backlog = -1 + +; List of ipv4 addresses of FastCGI clients which are allowed to connect. +; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original +; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address +; must be separated by a comma. If this value is left blank, connections will be +; accepted from any ip address. +; Default Value: any +listen.allowed_clients = {{ item.allowed_clients }} + +; Set permissions for unix socket, if one is used. In Linux, read/write +; permissions must be set in order to allow connections from a web server. Many +; BSD-derived systems allow connections regardless of permissions. +; Default Values: user and group are set as the running user +; mode is set to 0666 +;listen.owner = nobody +;listen.group = nobody +;listen.mode = 0666 + +; Unix user/group of processes +; Note: The user is mandatory. If the group is not set, the default user's group +; will be used. +; RPM: apache Choosed to be able to access some dir as httpd +user = {{ item.user }} +; RPM: Keep a group allowed to write in log dir. +group = {{ item.group }} + +; Choose how the process manager will control the number of child processes. +; Possible Values: +; static - a fixed number (pm.max_children) of child processes; +; dynamic - the number of child processes are set dynamically based on the +; following directives: +; pm.max_children - the maximum number of children that can +; be alive at the same time. +; pm.start_servers - the number of children created on startup. +; pm.min_spare_servers - the minimum number of children in 'idle' +; state (waiting to process). If the number +; of 'idle' processes is less than this +; number then some children will be created. +; pm.max_spare_servers - the maximum number of children in 'idle' +; state (waiting to process). If the number +; of 'idle' processes is greater than this +; number then some children will be killed. +; Note: This value is mandatory. +pm = {{ item.pm }} + +; The number of child processes to be created when pm is set to 'static' and the +; maximum number of child processes to be created when pm is set to 'dynamic'. +; This value sets the limit on the number of simultaneous requests that will be +; served. Equivalent to the ApacheMaxClients directive with mpm_prefork. +; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP +; CGI. +; Note: Used when pm is set to either 'static' or 'dynamic' +; Note: This value is mandatory. +pm.max_children = {{ item.pm_max_children }} + +; The number of child processes created on startup. +; Note: Used only when pm is set to 'dynamic' +; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2 +pm.start_servers = {{ item.pm_start_servers }} + +; The desired minimum number of idle server processes. +; Note: Used only when pm is set to 'dynamic' +; Note: Mandatory when pm is set to 'dynamic' +pm.min_spare_servers = {{ item.pm_min_spare }} + +; The desired maximum number of idle server processes. +; Note: Used only when pm is set to 'dynamic' +; Note: Mandatory when pm is set to 'dynamic' +pm.max_spare_servers = {{ item.pm_max_spare }} + +; The number of requests each child process should execute before respawning. +; This can be useful to work around memory leaks in 3rd party libraries. For +; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS. +; Default Value: 0 +pm.max_requests = {{ item.pm_max_requests }} + +; The URI to view the FPM status page. If this value is not set, no URI will be +; recognized as a status page. By default, the status page shows the following +; information: +; accepted conn - the number of request accepted by the pool; +; pool - the name of the pool; +; process manager - static or dynamic; +; idle processes - the number of idle processes; +; active processes - the number of active processes; +; total processes - the number of idle + active processes. +; The values of 'idle processes', 'active processes' and 'total processes' are +; updated each second. The value of 'accepted conn' is updated in real time. +; Example output: +; accepted conn: 12073 +; pool: www +; process manager: static +; idle processes: 35 +; active processes: 65 +; total processes: 100 +; By default the status page output is formatted as text/plain. Passing either +; 'html' or 'json' as a query string will return the corresponding output +; syntax. Example: +; http://www.foo.bar/status +; http://www.foo.bar/status?json +; http://www.foo.bar/status?html +; Note: The value must start with a leading slash (/). The value can be +; anything, but it may not be a good idea to use the .php extension or it +; may conflict with a real PHP file. +; Default Value: not set +{% if item.pm_status_enabled %} +pm.status_path = {{ item.pm_status_path }} +{% endif %} + +; The ping URI to call the monitoring page of FPM. If this value is not set, no +; URI will be recognized as a ping page. This could be used to test from outside +; that FPM is alive and responding, or to +; - create a graph of FPM availability (rrd or such); +; - remove a server from a group if it is not responding (load balancing); +; - trigger alerts for the operating team (24/7). +; Note: The value must start with a leading slash (/). The value can be +; anything, but it may not be a good idea to use the .php extension or it +; may conflict with a real PHP file. +; Default Value: not set +{% if item.ping_enabled %} +ping.path = {{ item.ping_path }} +{% endif %} + +; This directive may be used to customize the response of a ping request. The +; response is formatted as text/plain with a 200 response code. +; Default Value: pong +{% if item.ping_enabled %} +ping.response = {{ item.ping_response }} +{% endif %} + +access.log = /var/log/php-fpm/$pool-access.log + +; The access log format. +; The following syntax is allowed +; %%: the '%' character +; %C: %CPU used by the request +; it can accept the following format: +; - %{user}C for user CPU only +; - %{system}C for system CPU only +; - %{total}C for user + system CPU (default) +; %d: time taken to serve the request +; it can accept the following format: +; - %{seconds}d (default) +; - %{miliseconds}d +; - %{mili}d +; - %{microseconds}d +; - %{micro}d +; %e: an environment variable (same as $_ENV or $_SERVER) +; it must be associated with embraces to specify the name of the env +; variable. Some exemples: +; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e +; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e +; %f: script filename +; %l: content-length of the request (for POST request only) +; %m: request method +; %M: peak of memory allocated by PHP +; it can accept the following format: +; - %{bytes}M (default) +; - %{kilobytes}M +; - %{kilo}M +; - %{megabytes}M +; - %{mega}M +; %n: pool name +; %o: ouput header +; it must be associated with embraces to specify the name of the header: +; - %{Content-Type}o +; - %{X-Powered-By}o +; - %{Transfert-Encoding}o +; - .... +; %p: PID of the child that serviced the request +; %P: PID of the parent of the child that serviced the request +; %q: the query string +; %Q: the '?' character if query string exists +; %r: the request URI (without the query string, see %q and %Q) +; %R: remote IP address +; %s: status (response code) +; %t: server time the request was received +; it can accept a strftime(3) format: +; %d/%b/%Y:%H:%M:%S %z (default) +; %T: time the log has been written (the request has finished) +; it can accept a strftime(3) format: +; %d/%b/%Y:%H:%M:%S %z (default) +; %u: remote user +; +; Default: "%R - %u %t \"%m %r\" %s" +;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%" + +; The timeout for serving a single request after which the worker process will +; be killed. This option should be used when the 'max_execution_time' ini option +; does not stop script execution for some reason. A value of '0' means 'off'. +; Available units: s(econds)(default), m(inutes), h(ours), or d(ays) +; Default Value: 0 +{% if item.req_term_timeout is defined %} +request_terminate_timeout = {{ item.req_term_timeout }} +{% else %} +request_terminate_timeout = {{ phpfpm_default_request_terminate_timeout }} +{% endif %} + +; The timeout for serving a single request after which a PHP backtrace will be +; dumped to the 'slowlog' file. A value of '0s' means 'off'. +; Available units: s(econds)(default), m(inutes), h(ours), or d(ays) +; Default Value: 0 +request_slowlog_timeout = {{ item.slowlog_timeout }} + +; The log file for slow requests +; Default Value: not set +; Note: slowlog is mandatory if request_slowlog_timeout is set +slowlog = /var/log/php-fpm/$pool-slow.log + +; Set open file descriptor rlimit. +; Default Value: system defined value +rlimit_files = {{ item.rlimit_files }} + +; Set max core size rlimit. +; Possible Values: 'unlimited' or an integer greater or equal to 0 +; Default Value: system defined value +;rlimit_core = 0 + +; Chroot to this directory at the start. This value must be defined as an +; absolute path. When this value is not set, chroot is not used. +; Note: chrooting is a great security feature and should be used whenever +; possible. However, all PHP paths will be relative to the chroot +; (error_log, sessions.save_path, ...). +; Default Value: not set +;chroot = + +; Chdir to this directory at the start. This value must be an absolute path. +; Default Value: current directory or / when chroot +;chdir = /var/www + +; Redirect worker stdout and stderr into main error log. If not set, stdout and +; stderr will be redirected to /dev/null according to FastCGI specs. +; Default Value: no +catch_workers_output = yes + +; Limits the extensions of the main script FPM will allow to parse. This can +; prevent configuration mistakes on the web server side. You should only limit +; FPM to .php extensions to prevent malicious users to use other extensions to +; exectute php code. +; Note: set an empty value to allow all extensions. +; Default Value: .php +security.limit_extensions = {{ item.php_extensions }} + +; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from +; the current environment. +; Default Value: clean env +;env[HOSTNAME] = $HOSTNAME +;env[PATH] = /usr/local/bin:/usr/bin:/bin +;env[TMP] = /tmp +;env[TMPDIR] = /tmp +;env[TEMP] = /tmp + +; Additional php.ini defines, specific to this pool of workers. These settings +; overwrite the values previously defined in the php.ini. The directives are the +; same as the PHP SAPI: +; php_value/php_flag - you can set classic ini defines which can +; be overwritten from PHP call 'ini_set'. +; php_admin_value/php_admin_flag - these directives won't be overwritten by +; PHP call 'ini_set' +; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no. + +; Defining 'extension' will load the corresponding shared extension from +; extension_dir. Defining 'disable_functions' or 'disable_classes' will not +; overwrite previously defined php.ini values, but will append the new value +; instead. + +; Default Value: nothing is defined by default except the values in php.ini and +; specified at startup with the -d argument +;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com +php_flag[display_errors] = {{ item.display_errors }} +php_admin_value[error_log] = {{ phpfpm_logdir }}/$pool-error.log +php_admin_flag[log_errors] = {{ item.log_errors }} +php_admin_value[memory_limit] = {{ item.memory_limit }} + +; Set session path to a directory owned by process user +php_value[session.save_handler] = files +php_value[session.save_path] = {{ phpfpm_session_prefix }}/{{ item.pool_name }} + diff --git a/php-fpm/templates/php-fpm.conf.j2 b/php-fpm/templates/php-fpm.conf.j2 new file mode 100644 index 00000000..836936b4 --- /dev/null +++ b/php-fpm/templates/php-fpm.conf.j2 @@ -0,0 +1,123 @@ +;;;;;;;;;;;;;;;;;;;;; +; FPM Configuration ; +;;;;;;;;;;;;;;;;;;;;; + +; All relative paths in this configuration file are relative to PHP's install +; prefix. + +; Include one or more files. If glob(3) exists, it is used to include a bunch of +; files from a glob(3) pattern. This directive can be used everywhere in the +; file. +;include={{ phpfpm_base_dir }}/*.conf + +;;;;;;;;;;;;;;;;;; +; Global Options ; +;;;;;;;;;;;;;;;;;; + +[global] +; Pid file +; Default Value: none +pid = /var/run/php5-fpm.pid + +; Error log file +; If it's set to "syslog", log is sent to syslogd instead of being written +; in a local file. +; Default Value: /var/log/php-fpm.log +error_log = {{ phpfpm_error_logfile }} + +{% if phpfpm_error_logfile == 'syslog' %} +; syslog_facility is used to specify what type of program is logging the +; message. This lets syslogd specify that messages from different facilities +; will be handled differently. +; See syslog(3) for possible values (ex daemon equiv LOG_DAEMON) +; Default Value: daemon +syslog.facility = {{ phpfpm_syslog_facility }} + +; syslog_ident is prepended to every message. If you have multiple FPM +; instances running on the same server, you can change the default value +; which must suit common needs. +; Default Value: php-fpm +syslog.ident = {{ phpfpm_syslog_ident }} +{% endif %} + +; Log level +; Possible Values: alert, error, warning, notice, debug +; Default Value: notice +log_level = {{ phpfpm_log_level }} + +; If this number of child processes exit with SIGSEGV or SIGBUS within the time +; interval set by emergency_restart_interval then FPM will restart. A value +; of '0' means 'Off'. +; Default Value: 0 +emergency_restart_threshold = {{ phpfpm_emergency_restart_threshold }} + +; Interval of time used by emergency_restart_interval to determine when +; a graceful restart will be initiated. This can be useful to work around +; accidental corruptions in an accelerator's shared memory. +; Available Units: s(econds), m(inutes), h(ours), or d(ays) +; Default Unit: seconds +; Default Value: 0 +emergency_restart_interval = {{ phpfpm_emergency_restart_interval }} + +; Time limit for child processes to wait for a reaction on signals from master. +; Available units: s(econds), m(inutes), h(ours), or d(ays) +; Default Unit: seconds +; Default Value: 0 +process_control_timeout = {{ phpfpm_process_control_timeout }} + +; The maximum number of processes FPM will fork. This has been design to control +; the global number of processes when using dynamic PM within a lot of pools. +; Use it with caution. +; Note: A value of 0 indicates no limit +; Default Value: 0 +{% if phpfpm_set_process_max %} +process.max = {{ phpfpm_process_max }} +{% endif %} + +; Specify the nice(2) priority to apply to the master process (only if set) +; The value can vary from -19 (highest priority) to 20 (lower priority) +; Note: - It will only work if the FPM master process is launched as root +; - The pool process will inherit the master process priority +; unless it specified otherwise +; Default Value: no set +;process.priority = -19 + +; Send FPM to background. Set to 'no' to keep FPM in foreground for debugging. +; Default Value: yes +daemonize = yes + +; Set open file descriptor rlimit for the master process. +; Default Value: system defined value +;rlimit_files = 1024 + +; Set max core size rlimit for the master process. +; Possible Values: 'unlimited' or an integer greater or equal to 0 +; Default Value: system defined value +;rlimit_core = 0 + +{% if phpfpm_set_event_mechanism %} +; Specify the event mechanism FPM will use. The following is available: +; - select (any POSIX os) +; - poll (any POSIX os) +; - epoll (linux >= 2.5.44) +; Default Value: not set (auto detection) +events.mechanism = {{ phpfpm_event_mechanism }} +{% endif %} + +; When FPM is build with systemd integration, specify the interval, +; in second, between health report notification to systemd. +; Set to 0 to disable. +; Available Units: s(econds), m(inutes), h(ours) +; Default Unit: seconds +; Default value: 10 +systemd_interval = 10 + +;;;;;;;;;;;;;;;;;;;; +; Pool Definitions ; +;;;;;;;;;;;;;;;;;;;; + +; See /etc/php-fpm.d/*.conf + +; To configure the pools it is recommended to have one .conf file per +; pool in the following directory: +include={{ phpfpm_base_dir }}/pool.d/*.conf diff --git a/php-fpm/templates/php-fpm.logrotate.j2 b/php-fpm/templates/php-fpm.logrotate.j2 new file mode 100644 index 00000000..2408a785 --- /dev/null +++ b/php-fpm/templates/php-fpm.logrotate.j2 @@ -0,0 +1,10 @@ +{{ phpfpm_logdir}}/*log { + missingok + notifempty + sharedscripts + delaycompress + postrotate + /etc/init.d/php5-fpm reload 2>/dev/null || true + endscript +} + diff --git a/postfix-relay/defaults/main.yml b/postfix-relay/defaults/main.yml new file mode 100644 index 00000000..6e63d37e --- /dev/null +++ b/postfix-relay/defaults/main.yml @@ -0,0 +1,24 @@ +postfix_biff: "no" +postfix_append_dot_mydomain: "no" +postfix_use_relay_host: True + +postfix_use_sasl_auth: True +postfix_smtp_sasl_auth_enable: "yes" +postfix_smtp_create_relay_user: True +# See vars/isti-global.yml +postfix_relay_host: smtp-relay.research-infrastructures.eu +postfix_relay_port: 587 +#postfix_smtp_relay_user: smtp-user +# This one has to be set inside a vault file +#postfix_smtp_relay_pwd: set_you_password_here_in_a_vault_encrypted_file + +# The following options are used only whe postfix_relay_server is set to True +postfix_relay_server: False +#postfix_mynetworks: '{{ network.nmis }}, hash:/etc/postfix/network_table' +postfix_mynetworks: hash:/etc/postfix/network_table +postfix_interfaces: all +postfix_inet_protocols: all +postfix_message_size_limit: 10240000 + +postfix_sasl_packages: + - sasl2-bin diff --git a/postfix-relay/files/sasl_smtpd.conf b/postfix-relay/files/sasl_smtpd.conf new file mode 100644 index 00000000..1216a465 --- /dev/null +++ b/postfix-relay/files/sasl_smtpd.conf @@ -0,0 +1,3 @@ +pwcheck_method: saslauthd +mech_list: PLAIN LOGIN + diff --git a/postfix-relay/handlers/main.yml b/postfix-relay/handlers/main.yml new file mode 100644 index 00000000..547c6bb8 --- /dev/null +++ b/postfix-relay/handlers/main.yml @@ -0,0 +1,17 @@ +- name: Update SASL hash + shell: postmap hash:/etc/postfix/sasl_passwd + +- name: Reload postfix + service: name=postfix state=reloaded + +- name: Restart postfix + service: name=postfix state=restarted + +- name: Update the network hash table + shell: postmap hash:/etc/postfix/network_table + +- name: start saslauth daemon + service: name=saslauthd state=started enabled=yes + +- name: restart saslauth daemon + service: name=saslauthd state=restarted diff --git a/postfix-relay/tasks/main.yml b/postfix-relay/tasks/main.yml new file mode 100644 index 00000000..c0a9445f --- /dev/null +++ b/postfix-relay/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- include: smtp-common-packages.yml +- include: smtp-sasl-auth.yml + when: postfix_use_sasl_auth +- include: postfix-relay-server.yml + when: postfix_relay_server diff --git a/postfix-relay/tasks/postfix-relay-server.yml b/postfix-relay/tasks/postfix-relay-server.yml new file mode 100644 index 00000000..fae5c921 --- /dev/null +++ b/postfix-relay/tasks/postfix-relay-server.yml @@ -0,0 +1,55 @@ +--- +- name: Write the network hash file + template: src=network_table.j2 dest=/etc/postfix/network_table owner=root group=root mode=0444 + when: postfix_relay_server + notify: Update the network hash table + tags: + - postfix-relay + +- name: Activate the submission port on the postfix master file + template: src=postfix-master.cf.j2 dest=/etc/postfix/master.cf owner=root group=root mode=0444 + when: postfix_relay_server + notify: Restart postfix + tags: + - postfix-relay + +- name: Install the sasl2 authentication infrastructure + apt: pkg={{ item }} state=installed + with_items: postfix_sasl_packages + when: postfix_relay_server + tags: + - postfix-relay + +- name: Create the sasl directory inside /etc/postfix + file: dest=/etc/postfix/sasl state=directory owner=root group=root mode=0555 + when: postfix_relay_server + tags: + - postfix-relay + +- name: Install the smtpd.conf file inside inside /etc/postfix/sasl + copy: src=sasl_smtpd.conf dest=/etc/postfix/sasl/smtpd.conf owner=root group=root mode=0444 + when: postfix_relay_server + tags: + - postfix-relay + +- name: Enable the saslauth daemon + action: configfile path=/etc/default/saslauthd key=START value='yes' syntax=shell + when: postfix_relay_server + notify: start saslauth daemon + tags: + - postfix-relay + +- name: Change the socket path because postfix on debian runs inside a chroot jail + action: configfile path=/etc/default/saslauthd key=OPTIONS value='"-c -m /var/spool/postfix/var/run/saslauthd"' syntax=shell + when: postfix_relay_server + notify: restart saslauth daemon + tags: + - postfix-relay + +- name: Assign the sasl group to the postfix user so that postfix can use the saslauthd socket + user: name=postfix groups='sasl' + when: postfix_relay_server + notify: Restart postfix + tags: + - postfix-relay + diff --git a/postfix-relay/tasks/smtp-common-packages.yml b/postfix-relay/tasks/smtp-common-packages.yml new file mode 100644 index 00000000..6c1c5fe4 --- /dev/null +++ b/postfix-relay/tasks/smtp-common-packages.yml @@ -0,0 +1,15 @@ +--- +- name: Install postfix and libsas to do mail relay + action: apt pkg={{ item }} state=present + with_items: + - postfix + - libsasl2-2 + tags: + - postfix-relay + +- name: Write the postfix main configuration file + template: src=main.cf.j2 dest=/etc/postfix/main.cf owner=root group=root mode=0444 + notify: Restart postfix + tags: + - postfix-relay + diff --git a/postfix-relay/tasks/smtp-sasl-auth.yml b/postfix-relay/tasks/smtp-sasl-auth.yml new file mode 100644 index 00000000..44ccf945 --- /dev/null +++ b/postfix-relay/tasks/smtp-sasl-auth.yml @@ -0,0 +1,8 @@ +--- +- name: Write sasl hash file + template: src=sasl_passwd.j2 dest=/etc/postfix/sasl_passwd owner=root group=root mode=0400 + when: postfix_use_sasl_auth + notify: Update SASL hash + tags: + - postfix-relay + diff --git a/postfix-relay/templates/main.cf.j2 b/postfix-relay/templates/main.cf.j2 new file mode 100644 index 00000000..cc4467d0 --- /dev/null +++ b/postfix-relay/templates/main.cf.j2 @@ -0,0 +1,83 @@ +# See /usr/share/postfix/main.cf.dist for a commented, more complete version + + +# Debian specific: Specifying a file name will cause the first +# line of that file to be used as the name. The Debian default +# is /etc/mailname. +#myorigin = /etc/mailname + +smtpd_banner = $myhostname ESMTP $mail_name +biff = {{ postfix_biff }} + +# appending .domain is the MUA's job. +append_dot_mydomain = {{ postfix_append_dot_mydomain }} + +# Uncomment the next line to generate "delayed mail" warnings +#delay_warning_time = 4h + +readme_directory = no + +# TLS parameters +# Server +smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem +smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key +smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache +{% if postfix_relay_server %} +smtpd_tls_security_level = encrypt +smtpd_tls_auth_only = yes +{% endif %} +smtpd_use_tls=yes +# Client +smtp_tls_security_level = encrypt +smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache + +# See /usr/share/doc/postfix/TLS_README.gz in the postfix-doc package for +# information on enabling SSL in the smtp client. + +myhostname = {{ ansible_fqdn }} +alias_maps = hash:/etc/aliases +alias_database = hash:/etc/aliases +myorigin = /etc/mailname +mydestination = {{ ansible_fqdn }}, localhost +{% if postfix_use_relay_host %} +relayhost = {{ postfix_relay_host }}:{{ postfix_relay_port }} +{% endif %} +{% if not postfix_relay_server %} +mynetworks = 127.0.0.1 +inet_interfaces = localhost, ip6-localhost +inet_protocols = ipv4 +{% endif %} +mailbox_size_limit = 0 +recipient_delimiter = + +{% if postfix_use_sasl_auth %} +smtp_sasl_auth_enable= {{ postfix_smtp_sasl_auth_enable }} +smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd +smtp_sasl_security_options = noanonymous +smtp_sasl_tls_security_options = noanonymous +smtp_sasl_mechanism_filter = plain, login +{% endif %} +{% if postfix_relay_server %} +smtpd_sasl_path = smtpd +smtpd_sasl_auth_enable = yes +smtpd_sasl_security_options = noanonymous +smtpd_sasl_tls_security_options = noanonymous +smtpd_helo_required = yes +mynetworks = {{ postfix_mynetworks }} +inet_interfaces = {{ postfix_interfaces }} +message_size_limit = {{ postfix_message_size_limit }} + +# Don't talk to mail systems that don't know their own hostname. +smtpd_helo_restrictions = reject_unknown_helo_hostname +# Block clients that speak too early. +smtpd_data_restrictions = reject_unauth_pipelining + +# Our internal servers talk to the submission port so they are treated as clients +smtpd_client_restrictions = permit_sasl_authenticated, reject + +# Don't accept mail from domains that don't exist. +smtpd_sender_restrictions = reject_unknown_sender_domain + +# Relay control: local clients and +# authenticated clients may specify any destination domain. +smtpd_relay_restrictions = permit_sasl_authenticated, reject +{% endif %} diff --git a/postfix-relay/templates/network_table.j2 b/postfix-relay/templates/network_table.j2 new file mode 100644 index 00000000..f0f548a3 --- /dev/null +++ b/postfix-relay/templates/network_table.j2 @@ -0,0 +1,4 @@ +127.0.0.0/8 OK +127.0.0.1 OK +127.0.1.1 OK +[::1] OK diff --git a/postfix-relay/templates/postfix-master.cf.j2 b/postfix-relay/templates/postfix-master.cf.j2 new file mode 100644 index 00000000..4b68d62a --- /dev/null +++ b/postfix-relay/templates/postfix-master.cf.j2 @@ -0,0 +1,123 @@ +# +# Postfix master process configuration file. For details on the format +# of the file, see the master(5) manual page (command: "man 5 master" or +# on-line: http://www.postfix.org/master.5.html). +# +# Do not forget to execute "postfix reload" after editing this file. +# +# ========================================================================== +# service type private unpriv chroot wakeup maxproc command + args +# (yes) (yes) (yes) (never) (100) +# ========================================================================== +smtp inet n - - - - smtpd +#smtp inet n - - - 1 postscreen +#smtpd pass - - - - - smtpd +#dnsblog unix - - - - 0 dnsblog +#tlsproxy unix - - - - 0 tlsproxy +submission inet n - - - - smtpd + -o syslog_name=postfix/submission + -o smtpd_tls_security_level=encrypt + -o smtpd_sasl_auth_enable=yes +# -o smtpd_reject_unlisted_recipient=no +# -o smtpd_client_restrictions=$mua_client_restrictions +# -o smtpd_helo_restrictions=$mua_helo_restrictions +# -o smtpd_sender_restrictions=$mua_sender_restrictions +# -o smtpd_recipient_restrictions= +# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject +# -o milter_macro_daemon_name=ORIGINATING +#smtps inet n - - - - smtpd +# -o syslog_name=postfix/smtps +# -o smtpd_tls_wrappermode=yes +# -o smtpd_sasl_auth_enable=yes +# -o smtpd_reject_unlisted_recipient=no +# -o smtpd_client_restrictions=$mua_client_restrictions +# -o smtpd_helo_restrictions=$mua_helo_restrictions +# -o smtpd_sender_restrictions=$mua_sender_restrictions +# -o smtpd_recipient_restrictions= +# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject +# -o milter_macro_daemon_name=ORIGINATING +#628 inet n - - - - qmqpd +pickup unix n - - 60 1 pickup +cleanup unix n - - - 0 cleanup +qmgr unix n - n 300 1 qmgr +#qmgr unix n - n 300 1 oqmgr +tlsmgr unix - - - 1000? 1 tlsmgr +rewrite unix - - - - - trivial-rewrite +bounce unix - - - - 0 bounce +defer unix - - - - 0 bounce +trace unix - - - - 0 bounce +verify unix - - - - 1 verify +flush unix n - - 1000? 0 flush +proxymap unix - - n - - proxymap +proxywrite unix - - n - 1 proxymap +smtp unix - - - - - smtp +relay unix - - - - - smtp +# -o smtp_helo_timeout=5 -o smtp_connect_timeout=5 +showq unix n - - - - showq +error unix - - - - - error +retry unix - - - - - error +discard unix - - - - - discard +local unix - n n - - local +virtual unix - n n - - virtual +lmtp unix - - - - - lmtp +anvil unix - - - - 1 anvil +scache unix - - - - 1 scache +# +# ==================================================================== +# Interfaces to non-Postfix software. Be sure to examine the manual +# pages of the non-Postfix software to find out what options it wants. +# +# Many of the following services use the Postfix pipe(8) delivery +# agent. See the pipe(8) man page for information about ${recipient} +# and other message envelope options. +# ==================================================================== +# +# maildrop. See the Postfix MAILDROP_README file for details. +# Also specify in main.cf: maildrop_destination_recipient_limit=1 +# +maildrop unix - n n - - pipe + flags=DRhu user=vmail argv=/usr/bin/maildrop -d ${recipient} +# +# ==================================================================== +# +# Recent Cyrus versions can use the existing "lmtp" master.cf entry. +# +# Specify in cyrus.conf: +# lmtp cmd="lmtpd -a" listen="localhost:lmtp" proto=tcp4 +# +# Specify in main.cf one or more of the following: +# mailbox_transport = lmtp:inet:localhost +# virtual_transport = lmtp:inet:localhost +# +# ==================================================================== +# +# Cyrus 2.1.5 (Amos Gouaux) +# Also specify in main.cf: cyrus_destination_recipient_limit=1 +# +#cyrus unix - n n - - pipe +# user=cyrus argv=/cyrus/bin/deliver -e -r ${sender} -m ${extension} ${user} +# +# ==================================================================== +# Old example of delivery via Cyrus. +# +#old-cyrus unix - n n - - pipe +# flags=R user=cyrus argv=/cyrus/bin/deliver -e -m ${extension} ${user} +# +# ==================================================================== +# +# See the Postfix UUCP_README file for configuration details. +# +#uucp unix - n n - - pipe +# flags=Fqhu user=uucp argv=uux -r -n -z -a$sender - $nexthop!rmail ($recipient) +# +# Other external delivery methods. +# +#ifmail unix - n n - - pipe +# flags=F user=ftn argv=/usr/lib/ifmail/ifmail -r $nexthop ($recipient) +#bsmtp unix - n n - - pipe +# flags=Fq. user=bsmtp argv=/usr/lib/bsmtp/bsmtp -t$nexthop -f$sender $recipient +#scalemail-backend unix - n n - 2 pipe +# flags=R user=scalemail argv=/usr/lib/scalemail/bin/scalemail-store ${nexthop} ${user} ${extension} +#mailman unix - n n - - pipe +# flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py +# ${nexthop} ${user} diff --git a/postfix-relay/templates/sasl_passwd.j2 b/postfix-relay/templates/sasl_passwd.j2 new file mode 100644 index 00000000..446cc249 --- /dev/null +++ b/postfix-relay/templates/sasl_passwd.j2 @@ -0,0 +1 @@ +{{ postfix_relay_host }}:{{ postfix_relay_port }} {{ postfix_smtp_relay_user }}:{{ postfix_smtp_relay_pwd }} diff --git a/postgresql/defaults/main.yml b/postgresql/defaults/main.yml new file mode 100644 index 00000000..add45505 --- /dev/null +++ b/postgresql/defaults/main.yml @@ -0,0 +1,72 @@ +--- + +pg_use_postgresql_org_repo: False + +# 9.3 is the default version for Ubuntu trusty +# It is highly recommended to use the postgresql.org repositories +# +# See the features matrix here: http://www.postgresql.org/about/featurematrix/ +# +psql_version: 9.3 +psql_db_host: localhost +psql_db_port: 5432 +psql_db_size_w: 150000000 +psql_db_size_c: 170000000 +psql_listen_on_ext_int: False +psql_use_alternate_data_dir: False +psql_data_dir: '/var/lib/postgresql/{{ psql_version }}' +psql_conf_parameters: + - { name: 'max_connections', value: '100', set: 'False' } + - { name: 'ssl', value: 'true', set: 'False' } + - { name: 'shared_buffers', value: '24MB', set: 'False' } + - { name: 'temp_buffers', value: '8MB', set: 'False' } + - { name: 'work_mem', value: '1MB', set: 'False' } + - { name: 'maintenance_work_mem', value: '16MB', set: 'False' } + - { name: 'max_stack_depth', value: '2MB', set: 'False' } + - { name: 'wal_level', value: 'minimal', set: 'False' } + - { name: 'checkpoint_segments', value: '3', set: 'False' } + - { name: 'max_files_per_process', value: '1000', set: 'False' } + +psql_set_shared_memory: False +psql_sysctl_file: 30-postgresql-shm.conf +psql_sysctl_kernel_sharedmem_parameters: + - { name: 'kernel.shmmax', value: '33554432' } + - { name: 'kernel.shmall', value: '2097152' } + +postgresql_pkgs: + - postgresql-'{{ psql_version }}' + - postgresql-contrib-'{{ psql_version }}' + - postgresql-client-'{{ psql_version }}' + - pgtop + + +psql_db_name: db_name +psql_db_user: db_user +psql_db_pwd: "We cannot save the password into the repository. Use another variable and change pgpass.j2 accordingly. Encrypt the file that contains the variable with ansible-vault" + + + +postgres_install_gis_extensions: False +postgres_gis_version: 2.1 +postgres_gis_pkgs: + - postgresql-'{{ psql_version }}'-postgis-'{{ postgres_gis_version }}' + +pg_backup_logdir: /var/log/postgresql +pg_backup_bin: /usr/local/sbin/postgresql-backup +pg_backup_pgdump_bin: /usr/bin/pg_dump +pg_backup_retain_copies: 15 +pg_backup_build_db_list: "no" +pg_backup_db_list: '{{ psql_db_name}}' +pg_backup_destdir: /var/lib/pgsql/backups +pg_backup_logdir: /var/log/postgresql +pg_backup_logfile: '{{ pg_backup_logdir }}/postgresql-backup.log' +pg_backup_use_auth: "yes" +pg_backup_pass_file: /root/.pgpass +pg_backup_use_nagios: "yes" + +#psql_db_data: + # Example of line needed to create a db, create the user that owns the db, manage the db accesses (used by iptables too). All the fields are mandatory. + #- { name: '{{ psql_db_name }}', encoding: 'UTF8', user: '{{ psql_db_user }}', pwd: '{{ psql_db_pwd }}', roles: 'NOCREATEDB,NOSUPERUSER', allowed_hosts: [ 'xxx.xxx.xxx.xxx/32', 'yyy.yyy.yyy.yyy/32' ] } + # Example of line needed to manage the db accesses (used by iptables too), without creating the db and the user. Useful, for example, to give someone access to the postgresql db + #- { name: '{{ psql_db_name }}', user: '{{ psql_db_user }}', allowed_hosts: [ 'xxx.xxx.xxx.xxx/32', 'yyy.yyy.yyy.yyy/32' ] } + diff --git a/postgresql/files/postgresql-backup.cron b/postgresql/files/postgresql-backup.cron new file mode 100755 index 00000000..3acf1c07 --- /dev/null +++ b/postgresql/files/postgresql-backup.cron @@ -0,0 +1,36 @@ +#!/bin/bash + +if [ -f /etc/default/pg_backup ] ; then + . /etc/default/pg_backup +else + PG_SERVICE=postgresql + USE_NAGIOS=no + LOG_DIR=/var/log/postgresql + LOG_FILE=$LOG_DIR/postgresql-backup.log + PG_BCK_BIN=/usr/local/sbin/postgresql-backup +fi + +export PATH="/sbin:/usr/sbin:/usr/local/sbin:$PATH" +PG_SVC=$( service $PG_SERVICE status >/dev/null ) +PG_RUNNING=$? + +if [ ! -d $LOG_DIR ] ; then + mkdir -p $LOG_DIR +fi + +if [ "$PG_RUNNING" -ne "0" -a "$PG_RUNNING" -ne "3" ] ; then + echo "postgresql not running" > $LOG_FILE + exit 1 +else + $PG_BCK_BIN > $LOG_FILE 2>&1 +fi +if [ "${USE_NAGIOS}" == "yes" ] ; then + N_LOGDIR=/var/log/nagios-checks + if [ ! -d $N_LOGDIR ] ; then + mkdir -p $N_LOGDIR + fi +fi + +exit 0 + + diff --git a/postgresql/files/postgresql-backup.sh b/postgresql/files/postgresql-backup.sh new file mode 100755 index 00000000..43da19eb --- /dev/null +++ b/postgresql/files/postgresql-backup.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +if [ -f /etc/default/pg_backup ] ; then + . /etc/default/pg_backup +else + N_DAYS_TO_SPARE=7 + USE_NAGIOS=no + BUILD_DBLIST=yes + PG_USE_AUTH=yes + PG_PASS_FILE=/root/.pgpass + BACKUPDIR=/var/lib/pgsql/backups + DB_LIST= +fi + +# Year month day - hour minute second +SAVE_TIME=$( date +%Y%m%d-%H%M%S ) +TIMESTAMP= +RETVAL=0 +#export LANG=C +HISTDIR=$BACKUPDIR/history +TIMESTAMP_LOG=$BACKUPDIR/.timestamp +# If nagios is active, save the report status for each backup +# Nagios return values: 0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN +NAGIOS_LOG=$BACKUPDIR/.nagios-status + +if [ ! -d ${BACKUPDIR} ] ; then + mkdir -p ${BACKUPDIR} +fi +if [ ! -d ${HISTDIR} ] ; then + mkdir -p ${HISTDIR} +fi +LOCKFILE=${BACKUPDIR}/.dumplock + +umask 0077 +if [ "$BUILD_DBLIST" == "yes" ] ; then +# The psql -l command prints too much stuff + DB_LIST=$( psql -q -t -l -U postgres | grep -v template0 | grep -v template1 | grep -v : | grep -v ^\( | grep -v ^\- | awk '{print $1}' ) +fi + +if [ ! -f $LOCKFILE ] ; then + touch $LOCKFILE + if [ "$USE_NAGIOS" == "yes" ] ; then + > $NAGIOS_LOG + fi + if [ "${PG_USE_AUTH}" == "yes" -a ! -f $PG_PASS_FILE ] ; then + if [ "$USE_NAGIOS" == "yes" ] ; then + echo ".pgpass file not found, but authentication needed. All dbs: FAILED" >> $NAGIOS_LOG + fi + RETVAL=2 + else + # Backup of the db system data + # pg_dumpall -U postgres -g > ${HISTDIR}/pgsql-global.data.$SAVE_TIME + # Dump all the databases + for db in $DB_LIST ; do + if [ "${PG_USE_AUTH}" == "yes" ] ; then + DB_IN_AUTFILE=$( grep :${db}: $PG_PASS_FILE ) + DB_IN_AUTFILE_RETVAL=$? + if [ $DB_IN_AUTFILE_RETVAL -eq 0 ] ; then + PG_HOST=$( grep :${db}: $PG_PASS_FILE | cut -d : -f 1 ) + PG_PORT=$( grep :${db}: $PG_PASS_FILE | cut -d : -f 2 ) + PG_USER=$( grep :${db}: $PG_PASS_FILE | cut -d : -f 4 ) + ${PG_DUMP_BIN} -Fc -h $PG_HOST -p $PG_PORT -U $PG_USER $db > ${HISTDIR}/$db.data.$SAVE_TIME + DUMP_RESULT=$? + else + DUMP_RESULT=2 + fi + else + ${PG_DUMP_BIN} -Fc -U postgres $db > ${HISTDIR}/$db.data.$SAVE_TIME + DUMP_RESULT=$? + fi + + if [ "$USE_NAGIOS" == "yes" ] ; then + if [ $DUMP_RESULT -ne 0 ] ; then + echo "$db:FAILED" >> $NAGIOS_LOG + RETVAL=$DUMP_RESULT + else + echo "$db:OK" >> $NAGIOS_LOG + fi + fi + pushd ${BACKUPDIR}/ >/dev/null 2>&1 + rm -f $db.data + ln -s ${HISTDIR}/$db.data.$SAVE_TIME ./$db.data + popd >/dev/null 2>&1 + done + fi + TIMESTAMP=$( date +%s ) + echo "$TIMESTAMP" > $TIMESTAMP_LOG + rm -f $LOCKFILE +else + RETVAL=2 + if [ "$USE_NAGIOS" == "yes" ] ; then + echo "old backup still running:WARNING" >> $NAGIOS_LOG + fi +fi + + +# Remove the old backups +find ${HISTDIR} -ctime +$N_DAYS_TO_SPARE -exec rm -f {} \; + +exit $RETVAL diff --git a/postgresql/handlers/main.yml b/postgresql/handlers/main.yml new file mode 100644 index 00000000..a82fa733 --- /dev/null +++ b/postgresql/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart postgresql + service: name=postgresql state=restarted + +- name: Reload postgresql + service: name=postgresql state=reloaded diff --git a/postgresql/tasks/configure-access.yml b/postgresql/tasks/configure-access.yml new file mode 100644 index 00000000..903dac2b --- /dev/null +++ b/postgresql/tasks/configure-access.yml @@ -0,0 +1,87 @@ +--- +# +# To give postgresql access to remote clients you need to define something like that: +# +# psql_db_data: +# - { name: 'db_name', user: 'db_user', pwd: 'db_pwd', allowed_hosts: [ '146.48.123.17/32', '146.48.122.110/32' ] } +# +- name: Give access to the remote postgresql client + lineinfile: name=/etc/postgresql/{{ psql_version }}/main/pg_hba.conf regexp="^host {{ item.0.name }} {{ item.0.user }} {{ item.1 }}.*$" line="host {{ item.0.name }} {{ item.0.user }} {{ item.1 }} md5" + with_subelements: + - psql_db_data + - allowed_hosts + when: + - psql_listen_on_ext_int + - psql_db_data is defined + - item.1 is defined + notify: Reload postgresql + tags: + - postgresql + - postgres + - pg_hba + +- name: We want postgres listen on the public IP + lineinfile: name=/etc/postgresql/{{ psql_version }}/main/postgresql.conf regexp="^listen_addresses.*$" line="listen_addresses = '*'" + notify: Restart postgresql + when: + - psql_listen_on_ext_int + - psql_db_data is defined + tags: + - postgresql + - postgres + - pg_conf + +- name: If postgresql is only accessed from localhost make it listen only on the localhost interface + lineinfile: name=/etc/postgresql/{{ psql_version }}/main/postgresql.conf regexp="^listen_addresses.*$" line="listen_addresses = 'localhost'" + notify: Restart postgresql + when: + - not psql_listen_on_ext_int + - psql_db_data is defined + tags: + - postgresql + - postgres + - pg_conf + +- name: Log the connections + lineinfile: name=/etc/postgresql/{{ psql_version }}/main/postgresql.conf regexp="^log_connections.*$" line="log_connections = on" + notify: Restart postgresql + when: + - psql_listen_on_ext_int + - psql_db_data is defined + tags: + - postgresql + - postgres + - pg_conf + +- name: Log the disconnections + lineinfile: name=/etc/postgresql/{{ psql_version }}/main/postgresql.conf regexp="^log_disconnections.*$" line="log_disconnections = on" + notify: Restart postgresql + when: + - psql_listen_on_ext_int + - psql_db_data is defined + tags: + - postgresql + - postgres + - pg_conf + +- name: Log the hostnames + lineinfile: name=/etc/postgresql/{{ psql_version }}/main/postgresql.conf regexp="^log_hostname.*$" line="log_hostname = on" + notify: Restart postgresql + when: + - psql_listen_on_ext_int + - psql_db_data is defined + tags: + - postgresql + - postgres + - pg_conf + +- name: Set the correct permissions to the postgresql files + file: dest=/etc/postgresql/{{ psql_version }}/main/{{ item }} owner=root group=postgres mode=0640 + with_items: + - pg_hba.conf + - postgresql.conf + tags: + - postgresql + - postgres + - pg_hba + diff --git a/postgresql/tasks/main.yml b/postgresql/tasks/main.yml new file mode 100644 index 00000000..4b5c71d7 --- /dev/null +++ b/postgresql/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- include: postgresql_org_repo.yml + when: pg_use_postgresql_org_repo +- include: packages.yml +- include: postgis.yml + when: postgres_install_gis_extensions +- include: postgresql-config.yml +- include: psql-kernel-sharedmem.yml + when: psql_set_shared_memory +- include: configure-access.yml + when: psql_db_data is defined +- include: manage_pg_db.yml + when: psql_db_data is defined +- include: postgresql-backup.yml + when: psql_db_data is defined + diff --git a/postgresql/tasks/manage_pg_db.yml b/postgresql/tasks/manage_pg_db.yml new file mode 100644 index 00000000..da92d7a5 --- /dev/null +++ b/postgresql/tasks/manage_pg_db.yml @@ -0,0 +1,29 @@ +--- +- name: Add a user for the postgresql DBs + remote_user: root + sudo: yes + sudo_user: postgres + postgresql_user: user={{ item.user }} password={{ item.pwd }} role_attr_flags={{ item.roles }} + with_items: psql_db_data + when: + - psql_db_data is defined + - item.roles is defined + tags: + - postgresql + - postgres + - pg_db + +- name: Add the databases with the correct owner + remote_user: root + sudo: yes + sudo_user: postgres + postgresql_db: db={{ item.name }} encoding={{ item.encoding }} owner={{ item.user }} template=template0 state=present + with_items: psql_db_data + when: + - psql_db_data is defined + - item.roles is defined + tags: + - postgresql + - postgres + - pg_db + diff --git a/postgresql/tasks/packages.yml b/postgresql/tasks/packages.yml new file mode 100644 index 00000000..a49826d8 --- /dev/null +++ b/postgresql/tasks/packages.yml @@ -0,0 +1,24 @@ +--- +- name: install the postgresql packages + apt: pkg={{ item }} state=installed + with_items: postgresql_pkgs + notify: + Restart postgresql + tags: + - postgresql + - postgres + +- name: Install the packages that ansible needs to manage the postgresql users and databases + apt: pkg={{ item }} state=installed + with_items: + - python-psycopg2 + tags: + - postgresql + - postgres + +- name: Ensure that the postgresql server is started + service: name=postgresql state=started enabled=yes + tags: + - postgresql + - postgres + diff --git a/postgresql/tasks/postgis.yml b/postgresql/tasks/postgis.yml new file mode 100644 index 00000000..9ac2bd78 --- /dev/null +++ b/postgresql/tasks/postgis.yml @@ -0,0 +1,10 @@ +--- +- name: install the postgresql GIS packages + apt: pkg={{ item }} state=installed + with_items: postgres_gis_pkgs + notify: + Restart postgresql + tags: + - postgresql + - postgres + diff --git a/postgresql/tasks/postgresql-backup.yml b/postgresql/tasks/postgresql-backup.yml new file mode 100644 index 00000000..8a20264a --- /dev/null +++ b/postgresql/tasks/postgresql-backup.yml @@ -0,0 +1,35 @@ +--- +- name: Backup script for the postgresql database(s) + copy: src=postgresql-backup.sh dest=/usr/local/sbin/postgresql-backup owner=root mode=0744 + tags: + - pg_backup + - postgresql + - postgres + +- name: cron job for the postgresql database(s) backup + copy: src=postgresql-backup.cron dest=/etc/cron.daily/postgresql-backup owner=root mode=0744 + tags: + - pg_backup + - postgresql + - postgres + +- name: postgresql backup defaults + template: src=pg_backup-default.j2 dest=/etc/default/pg_backup owner=root mode=0744 + tags: + - pg_backup + - postgresql + - postgres + +- name: authorization file for the database backup + template: src=pgpass.j2 dest={{ pg_backup_pass_file }} owner=root mode=0600 + tags: + - pg_backup + - postgresql + - postgres + +- name: Create the postgresql backups data directory + file: dest={{ pg_backup_destdir }} owner=postgres group=postgres mode=700 recurse=yes state=directory + tags: + - pg_backup + - postgresql + - postgres diff --git a/postgresql/tasks/postgresql-config.yml b/postgresql/tasks/postgresql-config.yml new file mode 100644 index 00000000..3cc8c734 --- /dev/null +++ b/postgresql/tasks/postgresql-config.yml @@ -0,0 +1,28 @@ +--- +- name: Create the postgresql data directory if it is not in the default place + file: dest={{ psql_data_dir }} owner=postgres group=postgres mode=700 recurse=yes state=directory + when: psql_use_alternate_data_dir + tags: + - postgresql + - postgres + - pg_conf + +- name: Set the postgresql data dir if it is different from the default + action: configfile path=/etc/postgresql/{{ psql_version }}/main/postgresql.conf key=data_directory value="'{{ psql_data_dir }}'" + notify: Restart postgresql + when: psql_use_alternate_data_dir + tags: + - postgresql + - postgres + - pg_conf + +- name: Set some postgresql configuration parameters + action: configfile path=/etc/postgresql/{{ psql_version }}/main/postgresql.conf key={{ item.name }} value="{{ item.value }}" + with_items: psql_conf_parameters + when: item.set == 'True' + notify: Restart postgresql + tags: + - postgresql + - postgres + - pg_conf + diff --git a/postgresql/tasks/postgresql_org_repo.yml b/postgresql/tasks/postgresql_org_repo.yml new file mode 100644 index 00000000..3d44f1eb --- /dev/null +++ b/postgresql/tasks/postgresql_org_repo.yml @@ -0,0 +1,15 @@ +--- +- name: Get the signing key for the postgresql.org repository + apt_key: url=https://www.postgresql.org/media/keys/ACCC4CF8.asc state=present + when: pg_use_postgresql_org_repo + tags: + - postgresql + - postgresql_repo + +- name: Setup the postgresql.org repository + apt_repository: repo='deb http://apt.postgresql.org/pub/repos/apt/ {{ ansible_lsb.codename }}-pgdg main' + when: pg_use_postgresql_org_repo + tags: + - postgresql + - postgresql_repo + diff --git a/postgresql/tasks/psql-kernel-sharedmem.yml b/postgresql/tasks/psql-kernel-sharedmem.yml new file mode 100644 index 00000000..6a478d94 --- /dev/null +++ b/postgresql/tasks/psql-kernel-sharedmem.yml @@ -0,0 +1,8 @@ +--- +- name: Configure the kernel shared memory to please postgresql + sysctl: name={{ item.name }} value={{ item.value }} sysctl_file=/etc/sysctl.d/{{ psql_sysctl_file }} reload=yes state=present + with_items: psql_sysctl_kernel_sharedmem_parameters + when: psql_set_shared_memory + tags: + - sysctl + - postgresql diff --git a/postgresql/templates/pg_backup-default.j2 b/postgresql/templates/pg_backup-default.j2 new file mode 100644 index 00000000..68582245 --- /dev/null +++ b/postgresql/templates/pg_backup-default.j2 @@ -0,0 +1,13 @@ +PG_SERVICE=postgresql +PG_VERSION={{ psql_version }} +PG_DUMP_BIN={{ pg_backup_pgdump_bin }} +PG_BCK_BIN={{ pg_backup_bin }} +USE_NAGIOS={{ pg_backup_use_nagios }} +LOG_DIR={{ pg_backup_logdir }} +LOG_FILE={{ pg_backup_logfile}} +N_DAYS_TO_SPARE={{ pg_backup_retain_copies }} +BUILD_DBLIST={{ pg_backup_build_db_list }} +DB_LIST={{ pg_backup_db_list }} +PG_USE_AUTH={{ pg_backup_use_auth }} +PG_PASS_FILE={{ pg_backup_pass_file }} +BACKUPDIR={{ pg_backup_destdir }} diff --git a/postgresql/templates/pgpass.j2 b/postgresql/templates/pgpass.j2 new file mode 100644 index 00000000..1586807a --- /dev/null +++ b/postgresql/templates/pgpass.j2 @@ -0,0 +1,8 @@ +# Loop psql_db_data to add multiple databases +{% if psql_db_data is defined %} +{% for db in psql_db_data %} +{%if db.pwd is defined %} +{{ psql_db_host }}:{{ psql_db_port }}:{{ db.name }}:{{ db.user }}:{{ db.pwd }} +{% endif %} +{% endfor %} +{% endif %} diff --git a/redmine/README b/redmine/README new file mode 100644 index 00000000..dc57d62e --- /dev/null +++ b/redmine/README @@ -0,0 +1,18 @@ + +Best practices: + +- Use a boolean variable for each plugin to decide if it has to be installed or removed. + +- Provide a task to remove a plugin + + + +To delete a plugin: + +1. Downgrade the database + + cd /srv/redmine/d4science; bundle exec rake redmine:plugins:migrate NAME=plugin_name VERSION=0 RAILS_ENV=production + +2. Remove the plugin from the plugins folder (/srv/redmine/d4science/plugins/plugin_name) + +3. Restart unicorn (or web server) diff --git a/redmine/defaults/main.yml b/redmine/defaults/main.yml new file mode 100644 index 00000000..9059c34a --- /dev/null +++ b/redmine/defaults/main.yml @@ -0,0 +1,105 @@ +--- +# +# Note: this plugin depends on apache. It's not usable with nginx yet +# +redmine_version: 2.5.2 +redmine_inst_name: nemis +redmine_main_project: nemis +redmine_db_user: redm_nemis +redmine_db_name: redm_nemis +redmine_db_host: localhost +# The commented variables need to be set on the playbooks +#redmine_glob_root_dir: /srv/redmine +# It is a redmine_glob_root_dir subdirectory +redmine_inst_dir: nemis +redmine_user: nemis +redmine_group: nemis + +redmine_glob_user: www-data +redmine_glob_group: www-data +redmine_glob_users_home_base: /srv/redmine-home + +# Plugins +redmine_install_agile_plugin: False +rm_scrum_plugin: False +rm_advanced_roadmap_plugin: False +rm_scrum2b_plugin: False +rm_autowatcher_plugin: False +rm_issuereminder_plugin: False +rm_mention_plugin: False +rm_defaultcustomquery_plugin: False +rm_gamification_plugin: False +rm_closesresolvedissue_plugin: False +rm_defaultassign_plugin: False +rm_onceassignedeverwatcher_plugin: False +rm_clipboardimagepaste_plugin: False +rm_hotkeysjs_plugin: False +rm_issuessorting_plugin: False +rm_mylyn_plugin: False +rm_quickedit_plugin: False +rm_quickview_plugin: False +rm_codereview_plugin: False +rm_globalroles_plugin: False +rm_unreadissues_plugin: False +rm_usability_plugin: False +rm_mylynconnector_plugin: False +rm_addsubversionlinks_plugin: False +# +# IMPORTANT: these are mutually exclusive. One of the two needs to be set to True +ruby_use_mod_passenger: True +ruby_use_unicorn: False + +# Used by unicorn +unicorn_listen_port: 4000 +unicorn_listen_address: 127.0.0.1 +unicorn_worker_processes: 5 +unicorn_timeout: 120 +unicorn_log_dir: /var/log/unicorn +unicorn_pid_file: /run/unicorn/unicorn.pid +unicorn_apache_modules: + - proxy_balancer + - proxy + - proxy_http + - lbmethod_byrequests + - lbmethod_bytraffic + - lbmethod_bybusyness + - lbmethod_heartbeat + +# For Ubuntu trusty +redmine_base_packages: + - subversion + - git-core + - curl + - ruby + - rails + - ruby-rmagick + - rails + - zlib1g-dev + - libpq-dev + - libmysqld-dev + - libmagickwand-dev + - libmagickcore-dev + - imagemagick + +# For Ubuntu precise +redmine_base_packages_old_gems: + - subversion + - git-core + # Needed to compile the apache passenger module + - curl + - libcurl4-openssl-dev + - apache2-prefork-dev + - libmagickwand-dev + - librmagick-ruby + - ruby1.9.3 + - rubygems + - rails + +redmine_base_apache_modules: + - ssl + - rewrite + - expires + +redmine_additional_gems: + - pry + diff --git a/redmine/files/a1-theme.zip b/redmine/files/a1-theme.zip new file mode 100644 index 00000000..104fb529 Binary files /dev/null and b/redmine/files/a1-theme.zip differ diff --git a/redmine/files/circle-theme.zip b/redmine/files/circle-theme.zip new file mode 100644 index 00000000..bdb8e4b9 Binary files /dev/null and b/redmine/files/circle-theme.zip differ diff --git a/redmine/files/global_roles.zip b/redmine/files/global_roles.zip new file mode 100644 index 00000000..1bee29d8 Binary files /dev/null and b/redmine/files/global_roles.zip differ diff --git a/redmine/files/mod-passenger.load b/redmine/files/mod-passenger.load new file mode 100644 index 00000000..502a02e5 --- /dev/null +++ b/redmine/files/mod-passenger.load @@ -0,0 +1,3 @@ +LoadModule passenger_module /var/lib/gems/1.9.1/gems/passenger-4.0.21/buildout/apache2/mod_passenger.so +PassengerRoot /var/lib/gems/1.9.1/gems/passenger-4.0.21 +PassengerDefaultRuby /usr/bin/ruby1.9.1 diff --git a/redmine/files/redmine-configuration.yml b/redmine/files/redmine-configuration.yml new file mode 100644 index 00000000..e1dffeb6 --- /dev/null +++ b/redmine/files/redmine-configuration.yml @@ -0,0 +1,41 @@ +# specific configuration options for production environment +# that overrides the default ones +# production: +# email_delivery: +# delivery_method: :async_smtp +# async_smtp_settings: +# address: 127.0.0.1 +# domain: 'research-infrastructures.eu' +# port: 25 +# enable_starttls_auto: false + +production: + email_delivery: + delivery_method: :async_sendmail + +# Key used to encrypt sensitive data in the database (SCM and LDAP passwords). +# If you don't want to enable data encryption, just leave it blank. +# WARNING: losing/changing this key will make encrypted data unreadable. +# +# If you want to encrypt existing passwords in your database: +# * set the cipher key here in your configuration file +# * encrypt data using 'rake db:encrypt RAILS_ENV=production' +# +# If you have encrypted data and want to change this key, you have to: +# * decrypt data using 'rake db:decrypt RAILS_ENV=production' first +# * change the cipher key here in your configuration file +# * encrypt data using 'rake db:encrypt RAILS_ENV=production' +# database_cipher_key: + +# Your secret key for verifying cookie session data integrity. If you +# change this key, all old sessions will become invalid! Make sure the +# secret is at least 30 characters and all random, no regular words or +# you'll be exposed to dictionary attacks. +# +# If you have a load-balancing Redmine cluster, you have to use the +# same secret token on each machine. +#secret_token: 'change it to a long random string' + +# specific configuration options for development environment +# that overrides the default ones +development: diff --git a/redmine/files/redmine.init b/redmine/files/redmine.init new file mode 100644 index 00000000..7e5f4841 --- /dev/null +++ b/redmine/files/redmine.init @@ -0,0 +1,58 @@ +#! /bin/bash + +### BEGIN INIT INFO +# Provides: unicorn +# Required-Start: $local_fs $remote_fs $network $syslog +# Required-Stop: $local_fs $remote_fs $network $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: starts the unicorn web server +# Description: starts unicorn +### END INIT INFO +APP= +USER= +DAEMON=unicorn +DAEMON_OPTS="-c $APP/config/unicorn.rb -E production -D" +NAME=unicorn +DESC="Unicorn app for $USER" +PID=/run/unicorn/unicorn.pid + +if [ -f /etc/default/unicorn-redmine ] ; then + . /etc/default/unicorn-redmine +fi + +case "$1" in + start) + CD_TO_APP_DIR="cd $APP" + START_DAEMON_PROCESS="bundle exec $DAEMON $DAEMON_OPTS" + + echo -n "Starting $DESC: " + if [ `whoami` = root ]; then + su - $USER -c "$CD_TO_APP_DIR > /dev/null 2>&1 && $START_DAEMON_PROCESS" + else + $CD_TO_APP_DIR > /dev/null 2>&1 && $START_DAEMON_PROCESS + fi + echo "$NAME." + ;; + stop) + echo -n "Stopping $DESC: " + kill -QUIT `cat $PID` + echo "$NAME." + ;; + restart) + echo -n "Restarting $DESC: " + kill -USR2 `cat $PID` + echo "$NAME." + ;; + reload) + echo -n "Reloading $DESC configuration: " + kill -HUP `cat $PID` + echo "$NAME." + ;; + *) + echo "Usage: $NAME {start|stop|restart|reload}" >&2 + exit 1 + ;; +esac + +exit 0 diff --git a/redmine/files/redmine_agile.zip b/redmine/files/redmine_agile.zip new file mode 100644 index 00000000..b55af892 Binary files /dev/null and b/redmine/files/redmine_agile.zip differ diff --git a/redmine/files/unread_issues.zip b/redmine/files/unread_issues.zip new file mode 100644 index 00000000..5099ea00 Binary files /dev/null and b/redmine/files/unread_issues.zip differ diff --git a/redmine/files/usability.zip b/redmine/files/usability.zip new file mode 100644 index 00000000..7507c4e5 Binary files /dev/null and b/redmine/files/usability.zip differ diff --git a/redmine/handlers/main.yml b/redmine/handlers/main.yml new file mode 100644 index 00000000..88e02a0d --- /dev/null +++ b/redmine/handlers/main.yml @@ -0,0 +1,66 @@ +--- +- name: apache2 reload + service: name=apache2 state=reloaded + +- name: apache2 reload when needed + when: ruby_use_mod_passenger + service: name=apache2 state=reloaded + +# NB: we set 1.9.1, but if ruby1.9.3 is installed the real default is 1.9.3. Blame debian/ubuntu +- name: set ruby 1.9.3 as default +# shell: update-alternatives --set ruby /usr/bin/ruby1.9.1 + alternatives: name=ruby path=/usr/bin/ruby1.9.1 + when: is_precise + ignore_errors: True + +# NB: we set 1.9.1, but if gem1.9.3 is installed the real default is 1.9.3. Blame debian/ubuntu +- name: set gem 1.9.3 as default +# shell: update-alternatives --set gem /usr/bin/gem1.9.1 + alternatives: name=gem path=/usr/bin/gem1.9.1 + when: is_precise + ignore_errors: True + +- name: Reload unicorn + service: name=redmine state=reloaded + +- name: Reload unicorn when needed + service: name=redmine state=restarted + when: ruby_use_unicorn + +- name: Reconfigure redmine + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; bundle exec rake redmine:plugins:migrate RAILS_ENV=production + notify: + - apache2 reload when needed + - Reload unicorn when needed + +- name: Reconfigure agile plugin + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; bundle exec rake redmine:plugins NAME=redmine_agile RAILS_ENV=production + notify: + - apache2 reload when needed + - Reload unicorn when needed + +- name: Bundle install + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; bundle install --without development test mysql RAILS_ENV=production + notify: + - apache2 reload when needed + - Reload unicorn when needed + +- name: Bundle install and reconfigure redmine + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; bundle install ; bundle exec rake redmine:plugins:migrate RAILS_ENV=production + notify: + - apache2 reload when needed + - Reload unicorn when needed + +- name: Remove a plugin and reconfigure redmine + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; bundle exec rake redmine:plugins:migrate NAME=redmine_plugin_name VERSION=0 RAILS_ENV=production + notify: + - apache2 reload when needed + - Reload unicorn when needed + +- name: Generate the mod-passenger executable + shell: passenger-install-apache2-module -a + when: ruby_use_mod_passenger + +- name: change the redmine permissions recursively + shell: chown -R {{ redmine_user }}:{{ redmine_group }} {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/files {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/log {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/tmp {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/public/plugin_assets + ignore_errors: True diff --git a/redmine/meta/main.yml b/redmine/meta/main.yml new file mode 100644 index 00000000..05d86f55 --- /dev/null +++ b/redmine/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: '../../library/apache' diff --git a/redmine/tasks/base-config.yml b/redmine/tasks/base-config.yml new file mode 100644 index 00000000..78dc93c2 --- /dev/null +++ b/redmine/tasks/base-config.yml @@ -0,0 +1,15 @@ +--- +- name: Load the required apache modules + apache2_module: name={{ item }} state=present + with_items: redmine_base_apache_modules + notify: apache2 reload + tags: + - apache + - redmine + +- name: Ensure that the apache ssl directory exists + file: dest=/etc/apache2/ssl state=directory owner=root group=root mode=0750 + tags: + - apache + - redmine + diff --git a/redmine/tasks/base-packages-old-gems.yml b/redmine/tasks/base-packages-old-gems.yml new file mode 100644 index 00000000..72a1b280 --- /dev/null +++ b/redmine/tasks/base-packages-old-gems.yml @@ -0,0 +1,22 @@ +--- +- name: install the packages needed to run the redmine infrastructure. install the ruby packages needed to run redmine. Set ruby 1.9.3 as default + apt: pkg={{ item }} state=installed + with_items: redmine_base_packages_old_gems + notify: + - set ruby 1.9.3 as default + - set gem 1.9.3 as default + +# The standard mod-passenger doesn't work with ruby 1.9 +- name: install the packages needed to run the redmine infrastructure + apt: pkg={{ item }} state=absent + with_items: + - libapache2-mod-passenger + notify: apache2 reload + +- name: Remove the old passenger configuration + file: src=/etc/apache2/mods-available/{{ item }} dest=/etc/apache2/mods-enabled/{{ item }} state=absent + with_items: + - passenger.load + - passenger.conf + notify: apache2 reload + diff --git a/redmine/tasks/base-packages.yml b/redmine/tasks/base-packages.yml new file mode 100644 index 00000000..f3a2d3fa --- /dev/null +++ b/redmine/tasks/base-packages.yml @@ -0,0 +1,8 @@ +--- +- name: Install the packages needed to run the redmine infrastructure. install the ruby packages needed to run redmine. Use ruby 2.0 + apt: pkg={{ item }} state=installed + with_items: redmine_base_packages + tags: + - ruby + - redmine + diff --git a/redmine/tasks/main.yml b/redmine/tasks/main.yml new file mode 100644 index 00000000..5cf64b60 --- /dev/null +++ b/redmine/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- include: base-packages-old-gems.yml + when: is_precise +- include: base-packages.yml + when: is_trusty +- include: rubygems.yml +- include: redmine.yml +- include: redmine-plugins.yml +- include: base-config.yml +- include: mod_passenger.yml + when: ruby_use_mod_passenger +- include: unicorn.yml + when: ruby_use_unicorn + diff --git a/redmine/tasks/mod_passenger.yml b/redmine/tasks/mod_passenger.yml new file mode 100644 index 00000000..4f35309a --- /dev/null +++ b/redmine/tasks/mod_passenger.yml @@ -0,0 +1,48 @@ +--- +- name: Install the phusion passenger repo key + apt_key: id=561F9B9CAC40B2F7 keyserver=keyserver.ubuntu.com state=present + register: update_apt_cache + when: ruby_use_mod_passenger + tags: + - ruby + - redmine + - passenger + +- name: Install the phusion passenger repo + apt_repository: repo='deb https://oss-binaries.phusionpassenger.com/apt/passenger {{ ansible_distribution_release }} main' state=present + register: update_apt_cache + when: ruby_use_mod_passenger + tags: + - ruby + - redmine + - passenger + +- name: Update the apt cache + apt: update_cache=yes + when: + - ruby_use_mod_passenger + - update_apt_cache.changed + ignore_errors: True + tags: + - ruby + - redmine + - passenger + +- name: Install the apache mod_passenger package + apt: pkg=libapache2-mod-passenger state=present + when: ruby_use_mod_passenger + notify: apache2 reload + tags: + - apache + - redmine + - passenger + +- name: Install the mod-passenger configuration + apache2_module: name=passenger state=present + when: ruby_use_mod_passenger + notify: apache2 reload + tags: + - apache + - redmine + - passenger + diff --git a/redmine/tasks/redmine-plugins.yml b/redmine/tasks/redmine-plugins.yml new file mode 100644 index 00000000..a87f6b1c --- /dev/null +++ b/redmine/tasks/redmine-plugins.yml @@ -0,0 +1,376 @@ +--- +# +# Plugins +# +- name: Redmine better gantt plugin + get_url: url=https://github.com/kulesa/redmine_better_gantt_chart/releases/download/v.0.9.0/redmine_better_gantt_chart_0.9.0.zip dest={{ redmine_glob_root_dir }}/plugins_download/redmine_better_gantt_chart_0.9.0.zip + tags: + - redmine + - redmine_plugins + +- name: Install the better gantt plugin + unarchive: src={{ redmine_glob_root_dir }}/plugins_download/redmine_better_gantt_chart_0.9.0.zip dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins copy=no owner=root group=root creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_better_gantt_chart/init.rb + notify: + - apache2 reload when needed + - Reload unicorn when needed + tags: + - redmine + - redmine_plugins + +- name: Install the ldap sync plugin + git: repo=https://github.com/thorin/redmine_ldap_sync.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_ldap_sync update=no + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + +- name: Redmine login audit plugin + git: repo=https://github.com/martin-denizet/redmine_login_audit.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_login_audit update=no + notify: + - Reconfigure redmine + tags: + - redmine + - redmine_plugins + +- name: Redmine subversion links plugin + git: repo=https://github.com/masamitsu-murase/redmine_add_subversion_links.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_add_subversion_links update=no + when: rm_addsubversionlinks_plugin + notify: + - apache2 reload when needed + - Reload unicorn when needed + tags: + - redmine + - redmine_plugins + +- name: Progressive projects list plugin + git: repo=https://github.com/stgeneral/redmine-progressive-projects-list.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/progressive_projects_list update=no + notify: + - Reconfigure redmine + tags: + - redmine + - redmine_plugins + +- name: didyoumean plugin + git: repo=https://github.com/abahgat/redmine_didyoumean.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_didyoumean update=no + notify: + - Reconfigure redmine + tags: + - redmine + - redmine_plugins + +- name: Install the graphs plugin + git: repo=https://github.com/bradbeattie/redmine-graphs-plugin.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_graphs update=no + notify: + - Reconfigure redmine + tags: + - redmine + - redmine_plugins + +# This one causes problems on redmine 2.5.2 +# - name: Install the backlogs plugin +# git: repo=https://github.com/backlogs/redmine_backlogs.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_backlogs update=no +# notify: +# - Bundle install and reconfigure redmine +# - apache2 reload +# tags: +# - redmine + +- name: Install the embedded tab plugin + git: repo=https://github.com/jamtur01/redmine_tab.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_tab update=no + notify: + - apache2 reload when needed + - Reload unicorn when needed + tags: + - redmine + - redmine_plugins + +- name: Install the recurring-tasks plugin + git: repo=https://github.com/nutso/redmine-plugin-recurring-tasks.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/recurring_tasks update=no + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + +- name: Redmine agile plugin. Free version from www.redminecrm.com + unarchive: src=redmine_agile.zip dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_agile/init.rb + when: redmine_install_agile_plugin + notify: + - Reconfigure agile plugin + - Bundle install + tags: + - redmine + - redmine_plugins + +- name: Get the scrum plugin + get_url: url=https://redmine.ociotec.com/attachments/download/302/scrum%20v0.9.1.tar.gz dest={{ redmine_glob_root_dir }}/plugins_download/scrum_v0.9.1.tar.gz validate_certs=no + when: rm_scrum_plugin + tags: + - redmine + - redmine_plugins + +- name: Unarchive the scrum plugin + unarchive: src={{ redmine_glob_root_dir }}/plugins_download/scrum_v0.9.1.tar.gz dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins copy=no owner=root group=root creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum/init.rb + when: rm_scrum_plugin + tags: + - redmine + - redmine_plugins + +- name: Rename the scrum plugin + shell: mv "{{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum v0.9.1" {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum/init.rb + when: rm_scrum_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + +- name: Install the redmine advanced roadmap plugin + git: repo=https://github.com/Coren/redmine_advanced_roadmap_v2.git dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/advanced_roadmap_v2 update=no + when: rm_advanced_roadmap_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_ar + +#- name: Get the scrum2b plugin +# get_url: url=https://github.com/scrum2b/scrum2b/archive/master.zip dest={{ redmine_glob_root_dir }}/plugins_download/scrum2b-2-1-stable.zip validate_certs=no +# when: rm_scrum2b_plugin +# tags: +# - redmine +# - redmine_plugins +# - redmine_plugins_scrum2b +# +#- name: Unarchive the scrum2b plugin +# unarchive: src={{ redmine_glob_root_dir }}/plugins_download/scrum2b-2-1-stable.zip dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins copy=no owner=root group=root creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum2b/init.rb +# when: rm_scrum2b_plugin +# tags: +# - redmine +# - redmine_plugins +# - redmine_plugins_scrum2b +# +#- name: Rename the scrum plugin +# shell: mv "{{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum2b-master" {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum2b creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum2b/init.rb +# when: rm_scrum2b_plugin +# notify: +# - Bundle install and reconfigure redmine +# tags: +# - redmine +# - redmine_plugins +# - redmine_plugins_scrum2b + +- name: Install the redmine scrum2b plugin + git: repo=https://github.com/scrum2b/scrum2b version=2-1-stable dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/scrum2b update=no + when: rm_scrum2b_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_scrum2b + +- name: Install the auto watcher for groups plugin + git: repo=https://github.com/akuznecov/redmine_auto_watchers_from_groups dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_auto_watchers_from_groups update=no + when: rm_autowatcher_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_autowatcher + +- name: Install the issue reminder plugin + git: repo=https://github.com/Hopebaytech/redmine_issue_reminder version=redmine2.6 dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_issue_reminder update=no + when: rm_issuereminder_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_issuereminder + +- name: Install the mention plugin + git: repo=https://github.com/stpl/redmine_mention_plugin dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_mention_plugin update=no + when: rm_mention_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_mention + +- name: Install the default custom query plugin + git: repo=https://github.com/hidakatsuya/redmine_default_custom_query dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_default_custom_query update=no + when: rm_defaultcustomquery_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_defaultcustomquery + +- name: Install the gamification plugin + git: repo=https://github.com/mauricio-camayo/redmine_gamification_plugin dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_gamification_plugin update=no + when: rm_gamification_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_gamification + +- name: Install the closes resolved issues plugin + git: repo=https://github.com/Jogi1j/redmine_closes_resolved_issues dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_closes_resolved_issues update=no + when: rm_closesresolvedissue_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_closesresolvedissue + +- name: Install the default assign plugin + git: repo=https://github.com/giddie/redmine_default_assign dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_default_assign update=no + when: rm_defaultassign_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_defaultassign + +- name: Install the once assigned ever watcher plugin + git: repo=https://github.com/raafael911/redmine_once_assigned_ever_watcher dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_once_assigned_ever_watcher update=no + when: rm_onceassignedeverwatcher_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_onceassignedeverwatcher + +- name: Install the clipboard image paste plugin + git: repo=https://github.com/peclik/clipboard_image_paste dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/clipboard_image_paste update=no + when: rm_clipboardimagepaste_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_clipboardimagepaste + +- name: Install the hotkeys js plugin + git: repo=https://github.com/sasha-ch/redmine_hotkeys_js version=v0.0.3 dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/clipboard_hotkeys_js update=no + when: rm_hotkeysjs_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_hotkeysjs + +- name: Install the issues sorting plugin + git: repo=https://github.com/JohnBat26/redmine_issues_sorting dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_issues_sorting update=no + when: rm_issuessorting_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_issuessorting + +- name: Install the mylyn plugin + git: repo=https://github.com/ljader/redmine-mylyn-plugin dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_mylyn_plugin update=no + when: rm_mylyn_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_mylyn + +- name: Install the quick edit plugin + git: repo=git://git.sourceforge.jp/gitroot/quickedit/quick_edit.git accept_hostkey=yes dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/quick_edit update=no + when: rm_quickedit_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_quickedit + +- name: Install the quick view plugin + git: repo=git://git.sourceforge.jp/gitroot/quickedit/quick_view.git accept_hostkey=yes dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/quick_view update=no + when: rm_quickview_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_quickview + +- name: Get the code review plugin + get_url: url=https://bitbucket.org/haru_iida/redmine_code_review/downloads/redmine_code_review-0.6.5.zip dest={{ redmine_glob_root_dir }}/plugins_download/redmine_code_review-0.6.5.zip validate_certs=no + when: rm_codereview_plugin + tags: + - redmine + - redmine_plugins + - redmine_plugins_codereview + +- name: Unarchive the code review plugin + unarchive: src={{ redmine_glob_root_dir }}/plugins_download/redmine_code_review-0.6.5.zip dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins copy=no owner=root group=root creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_code_review/init.rb + when: rm_codereview_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_codereview + +- name: Unarchive the global roles plugin +# manual download required http://rmplus.pro/en/redmine/plugins/global_roles + unarchive: src={{ redmine_glob_root_dir }}/plugins_download/global_roles.zip dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins copy=no owner=root group=root creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/global_roles/init.rb + when: rm_globalroles_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_globalroles + +- name: Unarchive the unread issues plugin +# manual download required http://rmplus.pro/en/redmine/plugins/unread_issues + unarchive: src={{ redmine_glob_root_dir }}/plugins_download/unread_issues.zip dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins copy=no owner=root group=root creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/unread_issues/init.rb + when: rm_unreadissues_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_unreadissues + +- name: Unarchive the usability plugin +# manual download required http://rmplus.pro/en/redmine/plugins/usability + unarchive: src={{ redmine_glob_root_dir }}/plugins_download/usability.zip dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins copy=no owner=root group=root creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/usability/init.rb + when: rm_usability_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_usability + +- name: Install the mylyn connector plugin + git: repo=git://github.com/danmunn/redmine_mylyn_connector.git accept_hostkey=yes dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/plugins/redmine_mylyn_connector update=no + when: rm_mylynconnector_plugin + notify: + - Bundle install and reconfigure redmine + tags: + - redmine + - redmine_plugins + - redmine_plugins_mylynconnector + + diff --git a/redmine/tasks/redmine.yml b/redmine/tasks/redmine.yml new file mode 100644 index 00000000..86bcd22f --- /dev/null +++ b/redmine/tasks/redmine.yml @@ -0,0 +1,154 @@ +--- +- name: Fail if the redmine data directory variable is not defined + fail: redmine_glob_root_dir is required for this role + when: redmine_glob_root_dir is not defined + tags: + - redmine + +- name: ensure that the redmine data directories exist + file: dest={{ item }} state=directory owner=root group=root + with_items: + - '{{ redmine_glob_root_dir }}' + - '{{ redmine_glob_users_home_base }}' + tags: + - redmine + +- name: Create the user that will run the redmine process + user: name={{ redmine_user }} createhome=true home={{ redmine_user_home }} shell=/bin/bash + tags: + - redmine + +- name: Ensure that the redmine user can write its $HOME/.subversion to store the svn site ssl certificate + file: dest={{ redmine_user_home }}/.subversion state=directory owner={{ redmine_user }} group={{ redmine_group }} + tags: + - redmine + +# We need to find a way to run svn and accept the certificate in non interactive mode as user www-data +# su - www-data svn list https://svn.driver.research-infrastructures.eu/driver +# NB: added "--trust-server-cert" to the svn options in lib/redmine/scm/adapters/subversion_adapter.rb +# don't know if it's useful +- name: Explicitly accept the svn ssl certificate + shell: /bin/true + tags: + - redmine + +# - name: Check if we have the svn.driver.research-infrastructures.eu public SSL certificate already +# shell: ls -l /etc/ssl/certs/svn.driver.research-infrastructures.eu.epm +# register: driver_ssl_cert +# ignore_errors: True + +# - name: Get the svn.driver.research-infrastructures.eu public SSL certificate +# command: openssl s_client -connect svn.driver.research-infrastructures.eu:443 -showcerts /dev/null|openssl x509 -outform PEM > /etc/ssl/certs/svn.driver.research-infrastructures.eu.epm +# when: "{{ driver_ssl_cert.rc }} != 0" + +- name: Get the redmine tarball + get_url: url=http://www.redmine.org/releases/redmine-{{ redmine_version }}.tar.gz dest={{ redmine_glob_root_dir }}/redmine-{{ redmine_version }}.tar.gz + tags: + - redmine + +- name: Explode the redmine archive + unarchive: src={{ redmine_glob_root_dir }}/redmine-{{ redmine_version }}.tar.gz dest={{ redmine_glob_root_dir }} copy=no owner=root group=root creates={{ redmine_glob_root_dir }}/redmine-{{ redmine_version }}/Rakefile + register: redmine_install + tags: + - redmine + +- name: Create the right path for the application. + file: src={{ redmine_glob_root_dir }}/redmine-{{ redmine_version }} dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }} state=link + tags: + - redmine + +- name: Install the database configuration + template: src=redmine-database.yml.j2 dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/config/database.yml owner=root group={{ redmine_group }} mode=0440 + tags: + - redmine + +- name: Install the configuration file. Needed to send email + copy: src=redmine-configuration.yml dest=/{{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/config/configuration.yml owner=root group={{ redmine_group }} mode=0440 + notify: + apache2 reload + tags: + - redmine + +- name: Install the gems required by redmine + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; bundle install --without development test sqlite mysql + when: redmine_install.changed + tags: + - redmine + +- name: Generate the secret token + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; rake generate_secret_token + when: redmine_install.changed + tags: + - redmine + +- name: Initialize the DB + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; RAILS_ENV=production rake db:migrate + when: redmine_install.changed + tags: + - redmine + +- name: Install the defauld DB data + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; RAILS_ENV=production REDMINE_LANG=en rake redmine:load_default_data + when: redmine_install.changed + tags: + - redmine + +- name: Install the packages needed by plugins or to build plugins required gems + apt: pkg={{ item }} state=installed + with_items: + - libxslt1-dev + tags: + - redmine + +# The themes come from http://www.redminecrm.com/ +- name: Install some optional themes + unarchive: src={{ item }}-theme.zip dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/public/themes creates={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/public/themes/{{ item }} + with_items: + - a1 + - circle + notify: + - apache2 reload when needed + - Reload unicorn when needed + tags: + - redmine + +- name: Cron jobs that manage recurring tasks + template: src={{ item }}.cron.j2 dest=/etc/cron.d/{{ item }} owner=root group=root mode=0444 + with_items: + - redmine-recurring-tasks + - redmine-ldap-sync + tags: + - redmine + +- name: Add unicorn to the redmine Gemfile + copy: dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/Gemfile.local content='gem "unicorn"\n' owner={{ redmine_user }} group={{ redmine_group }} + when: ruby_use_unicorn + tags: + - redmine + - unicorn + +- name: Upgrade rake to fix all the gems mess + shell: cd {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}; bundle update rake + tags: + - redmine + +- name: Fix the permission of some files + file: dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/{{ item }} owner={{ redmine_user }} group={{ redmine_group }} + with_items: + - Gemfile + - Gemfile.lock + tags: + - redmine + +- name: Ensure that redmine can write into some directories + file: dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/{{ item }} state=directory owner={{ redmine_user }} group={{ redmine_group }} + with_items: + - files + - log + - tmp + - public/plugin_assets + notify: + change the redmine permissions recursively + tags: + - redmine + diff --git a/redmine/tasks/rubygems.yml b/redmine/tasks/rubygems.yml new file mode 100644 index 00000000..b2741ab2 --- /dev/null +++ b/redmine/tasks/rubygems.yml @@ -0,0 +1,24 @@ +--- +- name: Install the gem bundler + gem: name=bundler state=latest + tags: + - ruby + - redmine + +- name: Install the gem packages needed by some external utilities + gem: name={{ item }} state=latest + with_items: redmine_additional_gems + tags: + - ruby + - redmine + +- name: Install the gem passenger. The apache mod_passenger module is too old. For ubuntu 12.04 + gem: name=passenger state=present + notify: Generate the mod-passenger executable + when: + - ruby_use_mod_passenger + - is_precise + tags: + - ruby + - redmine + diff --git a/redmine/tasks/unicorn.yml b/redmine/tasks/unicorn.yml new file mode 100644 index 00000000..295d5807 --- /dev/null +++ b/redmine/tasks/unicorn.yml @@ -0,0 +1,69 @@ +--- +- name: Install the unicorn ruby-on-rails service + gem: name={{ item }} state=latest + with_items: + - unicorn + when: ruby_use_unicorn + tags: + - ruby + - redmine + - unicorn + +- name: Create the unicorn log directory + file: dest={{ unicorn_log_dir }} state=directory owner={{ redmine_user }} group={{ redmine_user }} mode=0750 + tags: + - ruby + - redmine + - unicorn + +- name: Create the unicorn pid directory + file: dest=/var/run/unicorn state=directory owner={{ redmine_user }} group={{ redmine_user }} mode=0750 + tags: + - ruby + - redmine + - unicorn + +- name: Install the unicorn startup file for redmine + copy: src=redmine.init dest=/etc/init.d/redmine owner=root group=root mode=0755 + when: ruby_use_unicorn + tags: + - ruby + - redmine + - unicorn + +- name: Install the unicorn defaults file + template: src=unicorn-redmine.default.j2 dest=/etc/default/unicorn-redmine owner=root group=root mode=0644 + notify: Reload unicorn when needed + tags: + - ruby + - redmine + - unicorn + - unicorn_conf + +- name: Install the unicorn config + template: src=unicorn.conf.rb.j2 dest={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/config/unicorn.conf.rb owner=root group=root mode=0644 + when: ruby_use_unicorn + notify: Reload unicorn when needed + tags: + - ruby + - redmine + - unicorn + - unicorn_conf + +- name: Install the needed apache modules + apache2_module: name={{ item }} state=present + with_items: unicorn_apache_modules + when: ruby_use_unicorn + notify: apache2 reload + tags: + - apache + - redmine + - unicorn + +- name: Ensure that the unicorn service is enabled and running + service: name=redmine state=started enabled=yes + when: ruby_use_unicorn + tags: + - ruby + - redmine + - unicorn diff --git a/redmine/templates/redmine-database.yml.j2 b/redmine/templates/redmine-database.yml.j2 new file mode 100644 index 00000000..c7a95024 --- /dev/null +++ b/redmine/templates/redmine-database.yml.j2 @@ -0,0 +1,8 @@ +production: + adapter: postgresql + database: {{ redmine_db_name }} + host: {{ redmine_db_host }} + username: {{ redmine_db_user }} + password: {{ redmine_db_pwd }} + encoding: utf8 +# schema_search_path: (default - public) diff --git a/redmine/templates/redmine-ldap-sync.cron.j2 b/redmine/templates/redmine-ldap-sync.cron.j2 new file mode 100644 index 00000000..1f43da7b --- /dev/null +++ b/redmine/templates/redmine-ldap-sync.cron.j2 @@ -0,0 +1 @@ +*/30 * * * * {{ redmine_user }} rake -f {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/Rakefile --silent redmine:plugins:ldap_sync:sync_users RAILS_ENV=production >{{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/log/redmine-ldap-sync.log 2>&1 diff --git a/redmine/templates/redmine-recurring-tasks.cron.j2 b/redmine/templates/redmine-recurring-tasks.cron.j2 new file mode 100644 index 00000000..6604ed1e --- /dev/null +++ b/redmine/templates/redmine-recurring-tasks.cron.j2 @@ -0,0 +1,2 @@ +*/30 * * * * {{ redmine_user }} rake -f {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/Rakefile --silent RAILS_ENV=production redmine:recur_tasks >{{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/log/redmine-recurring-tasks.log 2>&1 +#*/30 * * * * {{ redmine_user }} rake -f {{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/Rakefile --silent RAILS_ENV=production redmine:check_periodictasks >{{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}/log/redmine-periodic-tasks.log 2>&1 diff --git a/redmine/templates/unicorn-redmine.default.j2 b/redmine/templates/unicorn-redmine.default.j2 new file mode 100644 index 00000000..12ea38bb --- /dev/null +++ b/redmine/templates/unicorn-redmine.default.j2 @@ -0,0 +1,7 @@ +APP={{ redmine_glob_root_dir }}/{{ redmine_inst_dir }} +USER={{ redmine_user }} +DAEMON=unicorn +DAEMON_OPTS="-c $APP/config/unicorn.conf.rb -E production -D -l {{ unicorn_listen_address }}:3999" +NAME=unicorn +DESC="Unicorn app for $USER" +PID={{ unicorn_pid_file }} diff --git a/redmine/templates/unicorn.conf.rb.j2 b/redmine/templates/unicorn.conf.rb.j2 new file mode 100644 index 00000000..91a273ce --- /dev/null +++ b/redmine/templates/unicorn.conf.rb.j2 @@ -0,0 +1,60 @@ +app_dir = "{{ redmine_glob_root_dir }}/{{ redmine_inst_dir }}" +working_directory app_dir +pid "{{ unicorn_pid_file }}" + +preload_app true +timeout {{ unicorn_timeout }} +worker_processes {{ unicorn_worker_processes }} + +# Log files +stderr_path "{{ unicorn_log_dir }}/unicorn.stderr.log" +stdout_path "{{ unicorn_log_dir }}/unicorn.stdout.log" + +GC.respond_to?(:copy_on_write_friendly=) and GC.copy_on_write_friendly = true + +before_fork do |server, worker| + # the following is highly recomended for Rails + "preload_app true" + # as there's no need for the master process to hold a connection + defined?(ActiveRecord::Base) and ActiveRecord::Base.connection.disconnect! + + ## + # When sent a USR2, Unicorn will suffix its pidfile with .oldbin and + # immediately start loading up a new version of itself (loaded with a new + # version of our app). When this new Unicorn is completely loaded + # it will begin spawning workers. The first worker spawned will check to + # see if an .oldbin pidfile exists. If so, this means we've just booted up + # a new Unicorn and need to tell the old one that it can now die. To do so + # we send it a QUIT. + # + # Using this method we get 0 downtime deploys. + + old_pid = "#{server.config[:pid]}.oldbin" + + if File.exists?(old_pid) && server.pid != old_pid + begin + sig = (worker.nr + 1) >= server.worker_processes ? :QUIT : :TTOU + Process.kill(sig, File.read(old_pid).to_i) + rescue Errno::ENOENT, Errno::ESRCH + # someone else did our job for us + end + end +end + +after_fork do |server, worker| + # Unicorn master loads the app then forks off workers - because of the way + # Unix forking works, we need to make sure we aren't using any of the parent's + # sockets, e.g. db connection + defined?(ActiveRecord::Base) and ActiveRecord::Base.establish_connection + # Redis and Memcached would go here but their connections are established + # on demand, so the master never opens a socket + #start the worker on port 4000, 4001, 4002 etc... + addr = "{{ unicorn_listen_address }}:#{ {{ unicorn_listen_port }} + worker.nr}" + # infinite tries to start the worker + server.listen(addr, :tries => 10, :delay => 5, :backlog => 128) + +end + + + + + diff --git a/solr-tomcat-instance/defaults/main.yml b/solr-tomcat-instance/defaults/main.yml new file mode 100644 index 00000000..0fb55504 --- /dev/null +++ b/solr-tomcat-instance/defaults/main.yml @@ -0,0 +1,34 @@ +--- +# solr +solr_http_port: 8983 +tomcat_http_port: '{{ solr_http_port }}' +tomcat_load_additional_default_conf: True +tomcat_version: 7 +# solr needs a lot of time to start if it needs to rebuild its indices +tomcat_restart_timeout: 100000 + +solr_version: 4.10.2 +solr_service: -solr +solr_config_name: hindex +solr_shards: 1 +solr_instance: '{{ solr_service }}' +solr_log_level: INFO +solr_http_port_1: '{{ tomcat_http_port }}' +solr_zoo_port: 9983 +solr_zoo_port_1: 9984 +solr_zoo_port_2: 9985 +solr_jmx_port_1: 8601 +solr_user: '{{ tomcat_user }}' +solr_group: '{{ tomcat_user }}' +solr_outside_tomcat_dir: False +# We need to define this one because we are using the tomcat multiple instances role +solr_tomcat_instance_dir: '{{ tomcat_m_instances_base_path }}/{{ solr_http_port }}' +solr_data_dir: '{{ solr_tomcat_instance_dir }}/solr' +solr_zookeeper_data_dir: '{{ solr_data_dir }}/zoo_data' +solr_install_collection1: False +# Stand alone +solr_opts: "-DzkRun -DnumShards={{ solr_shards }}" +# This is for the replica/sharded version +# We need to pass a lot of options to the jdk for zookeeper and the solr shard configuration +#solr_opts: "-DzkRun={{ ansible_fqdn}}:{{ solr_zoo_port }} -DnumShards={{ solr_shards }} -DzkHost=index1.t.hadoop.research-infrastructures.eu:{{ solr_zoo_port }},index2.t.hadoop.research-infrastructures.eu:{{ solr_zoo_port }},index3.t.hadoop.research-infrastructures.eu:{{ solr_zoo_port }} -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port={{ solr_jmx_port_1 }} -Dcom.sun.management.jmxremote.password.file={{ tomcat_conf_dir }}/jmxremote.passwd -Dcom.sun.management.jmxremote.access.file={{ tomcat_conf_dir }}/jmxremote.access" + diff --git a/solr-tomcat-instance/files/collection1.tar.gz b/solr-tomcat-instance/files/collection1.tar.gz new file mode 100644 index 00000000..80d4a3ea Binary files /dev/null and b/solr-tomcat-instance/files/collection1.tar.gz differ diff --git a/solr-tomcat-instance/files/solr-4.10.2.war b/solr-tomcat-instance/files/solr-4.10.2.war new file mode 100644 index 00000000..70e7a50a Binary files /dev/null and b/solr-tomcat-instance/files/solr-4.10.2.war differ diff --git a/solr-tomcat-instance/handlers/main.yml b/solr-tomcat-instance/handlers/main.yml new file mode 100644 index 00000000..15331111 --- /dev/null +++ b/solr-tomcat-instance/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: tomcat solr restart + service: name=tomcat-instance-'{{ solr_http_port }}' state=restarted sleep=20 + diff --git a/solr-tomcat-instance/meta/main.yml b/solr-tomcat-instance/meta/main.yml new file mode 100644 index 00000000..6f3588ba --- /dev/null +++ b/solr-tomcat-instance/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: '../../library/tomcat-multiple-instances' diff --git a/solr-tomcat-instance/tasks/main.yml b/solr-tomcat-instance/tasks/main.yml new file mode 100644 index 00000000..7c160a31 --- /dev/null +++ b/solr-tomcat-instance/tasks/main.yml @@ -0,0 +1,90 @@ +--- +- name: Ensure that the solr data directory exists + file: path={{ solr_data_dir }}/{{ item }} state=directory mode=0775 owner={{ solr_user }} group={{ solr_user }} + with_items: + - data/solr + - webapps + - zoo_data + tags: + - solr + +- name: Create the link from the tomcat instance for solr to the solr data directory + file: src={{ solr_data_dir }}/data/solr dest={{ solr_tomcat_instance_dir }}/solr state=link + when: solr_outside_tomcat_dir + tags: + - solr + +- name: Solr needs some additional packages + apt: pkg={{ item }} state={{ pkg_state }} + with_items: + - libslf4j-java + - libcommons-logging-java + tags: + - solr + - tomcat + +- name: Let the additional packages jar files visible to tomcat + file: src=/usr/share/java/{{ item }} dest={{ tomcat_catalina_home_dir }}/lib/{{ item }} state=link + with_items: + - slf4j-api.jar + - slf4j-log4j12.jar + - jul-to-slf4j.jar + - jcl-over-slf4j.jar + - commons-logging.jar + notify: tomcat solr restart + tags: + - solr + - tomcat + +- name: Install the solr webapp under {{ solr_data_mountpoint }}/webapps + copy: src=solr-{{ solr_version }}.war dest={{ solr_data_dir }}/webapps/solr-{{ solr_version }}.war owner={{ solr_user }} group={{ solr_user }} mode=0644 + register: solr_war_installed + notify: tomcat solr restart + tags: + - solr + - tomcat + +- name: Install the solr catalina definition + template: src=catalina-{{ item }}.j2 dest={{ solr_tomcat_instance_dir }}/conf/Catalina/localhost/{{ item }} owner=root group=root mode=0444 + with_items: + - solr.xml + notify: tomcat solr restart + tags: + - solr + +- name: Install the solr collection1 example + unarchive: src=collection1.tar.gz dest={{ solr_data_dir }}/data/solr/ + args: + creates: '{{ solr_data_dir }}/data/solr/collection1' + when: solr_install_collection1 + notify: tomcat solr restart + tags: + - solr + - tomcat + +- name: Fix the collection1 permissions + file: path={{ solr_data_dir }}/data/solr/ owner={{ solr_user }} group={{ solr_user }} recurse=yes + when: solr_install_collection1 + tags: + - solr + - tomcat + +- name: Install the tomcat.local default file + template: src={{ item }}.j2 dest=/etc/default/tomcat-instance-{{ solr_http_port }}.local owner=root group={{ solr_user }} mode=0440 + with_items: + - tomcat.local + notify: tomcat solr restart + tags: + - solr + - tomcat + +- name: Install the solr.xml and zookeeper conf files + template: src={{ item }}.j2 dest={{ solr_data_dir }}/data/solr/{{ item }} owner=root group={{ solr_user }} mode=0440 + with_items: + - solr.xml + - zoo.cfg + notify: tomcat solr restart + tags: + - solr + - tomcat + diff --git a/solr-tomcat-instance/templates/catalina-solr.xml.j2 b/solr-tomcat-instance/templates/catalina-solr.xml.j2 new file mode 100644 index 00000000..f18188f0 --- /dev/null +++ b/solr-tomcat-instance/templates/catalina-solr.xml.j2 @@ -0,0 +1,4 @@ + + + + diff --git a/solr-tomcat-instance/templates/solr.xml.j2 b/solr-tomcat-instance/templates/solr.xml.j2 new file mode 100644 index 00000000..9f1a7025 --- /dev/null +++ b/solr-tomcat-instance/templates/solr.xml.j2 @@ -0,0 +1,44 @@ + + + + + + + + + {{ ansible_fqdn }} + {{ solr_http_port_1 }} + ${hostContext:solr} + ${zkClientTimeout:30000} + ${genericCoreNodeNames:true} + + + + ${socketTimeout:0} + ${connTimeout:0} + + + diff --git a/solr-tomcat-instance/templates/tomcat.local.j2 b/solr-tomcat-instance/templates/tomcat.local.j2 new file mode 100644 index 00000000..95ad1540 --- /dev/null +++ b/solr-tomcat-instance/templates/tomcat.local.j2 @@ -0,0 +1,2 @@ +SOLR_OPTS="{{ solr_opts }}" +JAVA_OPTS="$JAVA_OPTS $SOLR_OPTS" diff --git a/solr-tomcat-instance/templates/zoo.cfg.j2 b/solr-tomcat-instance/templates/zoo.cfg.j2 new file mode 100644 index 00000000..321156f7 --- /dev/null +++ b/solr-tomcat-instance/templates/zoo.cfg.j2 @@ -0,0 +1,17 @@ +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=10 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=5 + +# the directory where the snapshot is stored. +dataDir={{ solr_zookeeper_data_dir }} +# NOTE: Solr defaults the dataDir to /zoo_data + +# the port at which the clients will connect +clientPort={{ solr_zoo_port }} +# NOTE: Solr sets this based on zkRun / zkHost params + diff --git a/ssh-keys/defaults/main.yml b/ssh-keys/defaults/main.yml new file mode 100644 index 00000000..95436d5f --- /dev/null +++ b/ssh-keys/defaults/main.yml @@ -0,0 +1,44 @@ +--- +manage_root_ssh_keys: True + +# +# Example: +# user_ssh_key: [ '{{ sandro_labruzzo }}','{{ michele_artini }}', '{{ claudio_atzori }}' ] +# +cm_pubkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJN8XR/N4p6FfymWJy7mwR3vbUboC4P+7CgZalflhK5iH0P7c24/zZDY9Y5QIq58IViY7napqZuRkNHnHcvm9mxtSxQ16qe03NulABN5V/ljgR0sQAWz8pwv68LDpR9uBSCbXDdDCUUlS+zOxCHA6s7O7PSFavX4An1Vd/mjwoeR4eLRQXNcKsK2Pu/BZ3TCLmWyi2otnxFiJ8IoKW1CvjxKWmt5BvAvys0dfsdnTSVz9yiUMwN5Oj8cw/jhKqadnkvqTGfGl1ELm9L2V7hT6LM0cIom9oRsQf+JJ6loBe3UUZGaAhY2jmARmZdX3qV9Wh+UtxaWMEAXB9mf/2cK9f jenkins@cm +ci_pubkey: ssh-dss AAAAB3NzaC1kc3MAAACBAPwK/P1MAOksk1vT8YQd4/d9apwx2Npbs1ynNq3jZloDClbR9bOyNQ41SA5HcSHvgRYHTDySw2nCDWew+FB5VqoEqmTecpy7MoPYyxOuRByx26LwgBIt7f3Dj1hrepwiWtrvY16dw7SYEs6+Bm8VGXRmlvGPORzuyP8plagI2641AAAAFQCknhxNYiauoYAfjcx1LONccKwjZQAAAIAJ097QfL/ehWEiaEI710t8wksckio1fhS9zLckNDyBaqMYYBQUSru/orWy6hkoF1hpCiRuhyKj5HyzIZmHRk0oPg6F6Kiq/9AKZAxH/mKD5Dsw0FVANQMuOq5DH2O3NYxlBEh/8tEqSg3BoNsv563i48FJ1DJeOd8/Ldi4tBcxswAAAIBu/R99IT3aOYkoC9z5I7qg0nL5duth4gMsJRJZbwoTtdY4ABF94GBHeb8RlQ+o7dxiUyBp0P5ME0p9Mc0OsTZPsLzYsnZfpzPIWmlNGaPPQExKFhpXkAwJ0zuDAatf9Tc7eT7bhf/vDsZXS4GKJ4HtRIVb6z5jvjq9Y27/HNC/6Q== jenkins@ci +claudio_atzori: ssh-dss AAAAB3NzaC1kc3MAAACBAPMUiX2cCrDItmblQgA2sRZ5SixdDvwmVG0yPk67wb2oZF8MCGCGwwt9eWI8EecMKIevoWF63pn8poUveqvnRRFfGCjly8Rl6cNM3QZRmc5hjU3HcG/eFDCs92+vGdYfN/UV1qi2xIKU8204VfpnpWfsPlBqion/mR/kfLgCD0RTAAAAFQD6xPbDfMl0mkPGNL591eYHlKbtwQAAAIBJjhez8Gy8WGMJdcd/0B8rgEuHhDA9SQTknc/V88OMMthe3T5dWwwesT0DU4fPbn9Be6QWU+SNrBESmB64UpreCeodvh9pnfe0xerYWMplELlHM1yRtCCDQp2iDXK/oRTZne3IX8+OPx1OSKkWzQAVls4PV92CDSS8h1B9yvutiAAAAIAd8tasvTEmFpjaqszB6gkCdTlRHuVshRdrvAE8NBg9n0EzN3GdIyzJMmMAtTb0oJZZ3KGnKZic/gGGbqEY48PMbd9/WpWTf8SJz1ccpt3EQMbvLBJUwsJQ8ObBYhVe3SIwucwsZguIiPNdHIje+g1fc1DQHd5ALt3ljAYCPW+Yug== claudio@claudio-desktop +michele_artini: ssh-dss AAAAB3NzaC1kc3MAAAEBAKYA5eODSPDAcAhTqXQQP5mzPmLfS7J929Ncl5eqTj6KsjayfNnKsKDPzXGD/YGEGTP82VQuBzk42c1WLoUi5GnB5kZUWfdrKJLbr4JXcVUYnNNIwWIc1L9YunmbFN2zllHUXHrKn6EeKjR6H5xT0KPOX17MUa462jA2FLvesaqOoomm/AeNBF1UkCx0mRJEAMGq+I3xSBQJVhUOFmRJ7n6b4X0E3GXxtpkAwHiBiHX1GtNh2gMeTkIBHeZrS90l7DOumM8Y5KOP+fBd5scxodHKG2p+t3gwzwU2RoF5Hq8OvT4B3Dr5qPZKBIrB6kh6/5rLv8O0Lbky2aeoiYaIR0sAAAAVAO1EMaAsE93IDppRLlV+EjNn/4HbAAABAQCDPZHdR+uG0jfsed/ONPzecBDAJ4qS99D50hqrmQIRtsuhwo9KFsJ1cVjgSYjToqg8XuPZaO26E1riHnFAGoExQFNdev++kGtMfT3sxHOLwDd19fA3KNftFY0oqzDkLuD4D1+8gWk7WmTk8M5O5McFuuAr5TmXFdFNT49/6Z+XOuIQxyuEq9kJxSbO+dag4699lm3ZadSq6SEC2u0WAgyaIYUorYPJyYETSvUpsBtv37+oGbbz7dfbZ5pnmYi70BFiC2G7fA79shn0X/+Gk2Wp7RTDP/OB++RZFcrjHFQtvETdGSviq2Lxl1C7zp61qAmd0TZJBZ19k29nXIrILEnJAAABAFRqkJyVwZerL+E2jbF5LP89NW9HsjrBOEBekohR5zQY1KUPDirbReaGdf5hM6tvRxQCjlD+VMNq2VFRDC+RqOot5+KIyCaom4sXeYZiJBWa1Zx5YLbUZnYBGIpsa+IICA4drYwInGUN2EhClPwfDvZzFhd422kZFjiLYNM1HQ9f5TbKf1cPLSE/OitxU6+/NrCbMaRO2QPrjAB2EQG2s3DB9qfMBPg/Re7DyGJgMBn6KUXZ6JRvVssASvF7WsRaf5zRpug335CymndSS4fvQY74XJiVtB4vqDZle+WhXut8jvZ1Zl525fZZg9smZ2anqVWGGRxael7hjvlwYXbazkw= michele@pc-artini +andrea_dellamico: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ9n6B+J5S7NPnwjejPC2WrvcRzC07WPnAoQ7ZHZ0Mv9JakyWItswzI3Drz/zI0mCamyuye+9dWz9v/ZRwUfBobVyXuptRaZIwxlMC/KsTZofpp3RHOBTteZ4/VM0VhEeiOHu+GuzNE0fRB2gsusWeMMae2cq4TjVAOMcQmJX496L703Smc14gFrP8y/P9jbC5HquuVnPR29PsW4mHidPmjdKkO7QmDfFAj44pEUGeInYOJe708C03NCpsjHw8AVdAJ6Pf16EOdDH+z8D6CByVO3s8UT0HJ85BRoIy6254/hmYLzyd/eRnCXHS/dke+ivrlA3XxG4+DmqjuJR/Jpfx adellam@semovente +sandro_labruzzo: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+PFOSF+U9pvWTH/9TYZer3oDvTU2q6wVPs0dvgYc9Ak1Wdzmq4Dj9nyeLBW3G1i5ddqFrr/QSjIroX2/y8Z8Dq+OZLRpBhSyLF9bV0jKbytJJYhkzIJHgE/ITTdbNQVZstjPZ0D4c/0lrbMwiiwsKWRqphmvMKFmgkO4M4w1qm8B3UYPHF3lZfw+vm+rgVv+FiOltgsRm+LU0IszeiiOd1WgPWUVYixFnNUVzDkXRDatO5//M1XMHM/PoontgnsCP2j9kxIptYgguiNZUIeMUFljw3SbV84NrVUSpL6/fzmvsEv05rkRT0+P8oPYIhxO1alKr99H9ADg7pU36rWaN sandro@sandro-pc +hadoop_test_cluster: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDi7O89HLqa3HMEkmCVF6/V/IWw8G8eaKWOOzDsLtQAFFti9rWHckyCSxNhtYuuiGLhn5Mad0E7JaguexU5j+Rm9Vu30ducF6DefJsOqQ5TfQhzN60w5f+y59BqWDSHBBawEhfuS2B5qj9iL76w8ZgMsqS+6WXiT792F9DoelYfKBODQi8/AE5C93iQiYyyFIrvy37KUfvBlzjSkNNHb5A36PlHmQBZD3WhROaZfjUfXifFzOSs9bERazttXG8HeElt7zbE40OSse2HG3y34gB+TvGIYbd3scQUiL5dEWt4cDSDBrEU6b1rG04uZgkscxCFwTDxPrHUVXS0ou03N4nr Hadoop test +tommaso_piccioli: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAzcHuDU7PgJwz34AsVG0E2+ZRx17ZKW1uDEGABNk3Z60/c9LTwWKPj6kcIRy6RzFJI5X+IgPJnYouXVmJsIWjVL8IRk8fP1ffJC6Fyf6H7+fCxu/Wwed5OoOCvKeZ0bEmJ1tlXFM6+EnxKqLCvz3fsNy8e4WKMnpS1hT8K6YB7PMjt60S3wOaxds1Lv4NmmgnfGM5uZFYrZCx1/GJCzNSh7AEEEUIVQ1B8xmXbet7whNiwDmiOnXSlt38dkIYT8kNMuRCj/r9wPr7FmoUCOFzUVXTcnuYagKyURrZ8QDyHbK6XQLYXgvCz/lWoErGFbDqpmBHHyvKSeLPxYfJpWJ70w== tom@tom +backup_agent: ssh-dss AAAAB3NzaC1kc3MAAACBANBn5i7oJd12+GAeDVSAiPqCxcCDzWe41g3Vy/LhbYKwG0smPNJRfvyf7lKWkgolJfMJZrk7bBVhJoApkV7vkFkrSPueyRC+/ohjafpOsmxRYiOaSrDZ2c9TbGFVZTh23pUXoDPp2Z0N8l471b9Mx/nqgtflCV+IVICcDZbUhcCTAAAAFQC+fmfljTFllCMKsgrSJcQAtiIT/QAAAIEAvrsLfmQzHQjt4G5FhcPVbvP87KUsDh0xksCfMRP6bQBz/3mcnt7V5/MLll/CZMiOWjRK3ww9zCYHprUwQtAZSllFWiGUKw1tDvf1ZQGESYP/vvWwcpPZpVsRHlhRtuMsQchSRxw03yYOqEEa2akWzQlvaZ4CWWym931mZg6zY4AAAACAG/l8dU/QEMK1JP3rDV0kZYvcxjUC9Mxw5ScTyVqVnxDL75ssX9HiQamsiTk0dYNyl8qkB38FfkB4LhEb8FkHs4toN+nTNPPlLqhpYMs+anwyNy32LnXAVP02VJ2+3exwGe0b5vtIFpj+j8s7YZMHN5x6d4xhZ9oq5M2pJN6M48E= root@dlibbackup +monja_dariva: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuQJvgDc8lQB+EArajGPEirRuYxGcInfiM3uRS0P5Dhqch6cuNdMFFjCoQVFL2Dvs7QNSRm8mvnPLWOCYLEFPBdXlA63w+n3VWoVOs0lUgQM77/axetd/K8BCkJlcA/exvVxLtzc5k8hN1k3OJY/Npi2Xa4WyEMV6t7+vYK3MXPjFBy4Y/aLWZvHcCn0zUbeB8T8PJ2S8taCIOMzemUzjGs3c0f4y6oaJx1gPw31PCahkaVS4ZLSt+0y3DRaGiXjyzgbQPf1whBOT4SSiX3SgdMvxA/Fzz2sSAn9PNfKq+/vygn7qDB79qzBhOXs36dPuwmsqggxIZasGUT/YfRp5Cw== monja@pc-monja +andrea_manzi: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAoCquwjgvRQXrHJ7sjY7/mFv0hEev4dljYKYz3Rf9r1rExQ6zku4tCvLkwmc+1U4ui2GCMQ70Hp1BbVdU01WVdAb6ESLAqk4m2NFiNxSsxerEyyOgnCvTA+Pcb1beVHgEm1/IA+6MgVPg71nE2OETpaoDNBGn+AmCdLqC67lXM9KlEaoLFFGY8ZbwJifWdidH/fk3rQojnGhxnFOidVu8QeV+b5kNTyVA2CUbCZCFZANIs/ZrDOmP5nmtA35vkIRU0OV6iBeJmcYsMwXmh8kiR6KoKVcH7gMMxTpBr/wjvdak7BeiZirP9poKE7XBiyHeatqQgEUOALsolkCYk8YJUw== andrea.manzi@shell.research-infrastructures.eu +antonis_lempesis: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA8nr14q0s/8V9Nv3bz7xCk9FwKbtN21qx33PDTUS/NjwHX/AQIE1ZFbepPOnzLuPy8LUtzrEI+cEMDjn37CLiZWjnZkPOaIV7ELUBvwIk6JBe6iXSq93atYJWxQQsPuc1uoAFWLayxExMRl+P0UQCP7pQmTg4v8U4VflCp0LLBglgBl5glIiw2fLAfc+JawefWGnp92djuvqii8zm5nUmgJ+5DjbSD0rMO+vYXme5ig6v6b2YFG0cUHiNk8evM6M+OWmtz1uzP6kfQ4SjCNpzib6Rub8hgPlkJH/z4S+7lF1e6uwohQyicwu6hfTfIL+IRRCrNTGtzcDmk405/nIETQ== antonislebesis@ekton.lan +alessia_bardi: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvfQoH4uRROhIUY5VTthAiY0Ga0cbg3smsT366C4Nd3TtU5ciBterRQv0YkvdQ4zS+e3D47PFRAuEyJEAJMp9+odhmjT6WPLhMYmE42b0qk+WC4uXG7V2rTX+wNvX4HaVHnlPai/6Of85rZ1AKbeMB2LLKMvj0n1HovVg6VbLUfrrxfkcTfgE27mukoRQy4RuZjQRjdJ1o7g4geA05CrFjDOriqwl4WDXWUNSkx2MwtOZ58ZLAVu84ce+RYvzxHC/wZptOx6U35fsoAaK7NPIiwbRbSbQqlAMnQauCLYTvfFKFkqY2JXp9q6lSsW4S5VnEeJjWvO/e7rxOmdbxGzx9w== lexis@lexis02 +andrea_mannocci: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtTV2pjWXgTmX5h9J7VtQbYZ2NoQyZmLKl5gHvBKcX4pgBNYR+OA0620l3I3bTLPzqx93y6N/GIi2ewutyk7n2a5qFAIZxhrQYR5rSQn07apTDSh9CKyAyy6baM/jQmZN4ba6ObHIFdtIPHyY0Z/2ni6ohWXuOPIC+me+/x4R6P5s7y6x4IoMOGcEtn+puJ1gAdMBhkn7IqMAbdMj3WbsBjDAJ2lT8Dbyet8fkW4TENxd0teRW9jGeSP8rtuapnAF6rgcvPn/gk3/0wnBsXjtlBe5VEJTsNXY50RoB+PdkLgT4h6613v2WtR6ZoCEVNLXbsJ2BabrCmntyEEJVdbMJ andrea@pc-andrea +marek_horst: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0tbauAEn91q209ek50lv6jeBGsYy+N25XPVE9e173L3oW/NR1DuIXdn/zpHy5sLKpWk2nLkGJxNBdAFlKKxDKzRRZ7aX8qB490o5H4GTGgdxIQtp8x66CvIjMyM4kYLExVb4WVV7yMxCxuClMk6/m0vo3h77VzL08e3uyLoa5FZ3RPbOFb6QvnH4QEoFp/6Hos9mJF2bY/w2DqUrUVgUeAO9k9uilqhv+rwHdsq20g9OXHNlWOOtNtrWq0pn1FU1jCooZsbqLeBcEGlvD/I1FxqLi7x5llpNVfHTmEHoczTmuo0sqAGmSxHWnz3C4KtVTHVqxLS6hSUp55j6DQwPnw== +eri_katsari: ssh-dss AAAAB3NzaC1kc3MAAACBALsZPjtXRreOknW7KAoBCiJ/QqFfHjz2JD0hTl/MQOPTNfn8532F1tpQDqMKz0H1XIljJas7EvrVDMNDO7iX5CbTmfLh2ds8ssPQ/LwH8ArNfsWaWyELVWJExXA9Xizcb8PApWScUEeRgP22ZTgnHYSX7zCQGhSn1Kb4vQ3H8MxbAAAAFQC8PJWnks+PWgqtO7Gb8SOV8oP3YQAAAIEArfQ69en2ZHku2T4FONfhjFLy7AKpq6Rh40KCGTSgowtbyyYVk0aRupqMlVolYwlbeY+/o21EFb1+Wy9nFDNsu1uY/1mESdLs256rRy6VJx8/VvuYg4r7TdSqypOa0QsqzCbExwZR4witez5yMQKZji8kmRKWKRsByFVk2OR0IxkAAACARQlw/skGy5wfkWd41YqsoDhfMLVNLAS2dKnUkLdifUKjymcSHC2WrYq2LfxVxrd9CFFp/yurlQ01v/818GX7nE9zBiRhhFS944Lk05CmInmcDt/J2iGq65bA/7iem9EhXkU+5Up1uYFgdubPEL7Za+Pk+Z9NMdqqjtco9Q0A6v8= eri@eri-duffy +marko_mikulicic: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCYNjCquDDIpGqJgr8DTkRd0Y1ngmrq+FFMb+UnALdm3I1Uch07Z+TAkcrkpr9RdyTjP3mNIUUyI18Z6NgUC2TR4x7wVA9eV0uGWP8BiocXWPjVhQJhtDkXldkP93ylyYlLJ4VQ+xGinKdg7ZA4KTpG6rnjL999AA4W0utj5B8Dj0l/wvp96ONq1ZOTzOc3h0t9NGVQLbXstNakQkPcb5E2hyt4QOOahpZ6TG2is460G5yEgV3xHT/VRJQn0OjKeHnXlDwXs53qwjeNrESMEv4wD2qufgAXKbPGK7+3GReE8VkkhwnEY1/ET4LaTyqg6eIp0mIiScDvBV0/UCNX8c49 mkm +jochen_schirrwagen: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqVJeLtXaqseUP3cHSIQw+6Piv6s0PmezFbj34oqcN81/JlzmTtpOd8GBX6N8Weo40HbKhlghOl08+3WP2fW3eg9vaST6xCy8BvzLcqb8LPBSlTXa8imAK9AWkR4peFi1zYpIciZpkAwaFtfpdSR/zJip2s61EgWhinUPHs/0PzCCM32P4Yc0qYygb+htv4AthZWChEbHSY7eNrXIOOvyQtUSbpGJ78VCEdlKuy+ehhTxlMOBxcKca1PSWU3jSmzkSxnUotr2IXiRK1bUVZYpXXd7K89EZfPpb3DG1z8UBf9n0obLdI0yvaka8z8l1KxbwuAhN9MyzHITALbniYIHOw== jochen@jochen-laptop ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAo5A+f0wdqoXCGEFBpePV892cq9MswIgK9vmDJ22TdHKQrN5h1sIHeXjxO3vnaktb62evFqZw1kueA0dwQhEA+Kvpc5qN1s+GfIxs4PbNjiNWNVgwrfGK11vlW/LP2GgbfZ7pl+Gxj6Qu65/A2eMf4c9ZjAOnHck6RQSttrfIjR0kLpqEB3o2x8s89vu/P5PG7mN+IsfW9Ow/612m+8ZG84qnVAo36lK9mgEFUToozIHfON14uC8VGTnsN9ff9S98GJkW8Ga3ha9voPwkp794LBHZlQj01Pwm4ZOx+tdOfTNXx06szjswacWXsW4zaTyH9MZP9LumubGG7eOse0y0bw== jochen@jochen-desktop ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAo5A+f0wdqoXCGEFBpePV892cq9MswIgK9vmDJ22TdHKQrN5h1sIHeXjxO3vnaktb62evFqZw1kueA0dwQhEA+Kvpc5qN1s+GfIxs4PbNjiNWNVgwrfGK11vlW/LP2GgbfZ7pl+Gxj6Qu65/A2eMf4c9ZjAOnHck6RQSttrfIjR0kLpqEB3o2x8s89vu/P5PG7mN+IsfW9Ow/612m+8ZG84qnVAo36lK9mgEFUToozIHfON14uC8VGTnsN9ff9S98GJkW8Ga3ha9voPwkp794LBHZlQj01Pwm4ZOx+tdOfTNXx06szjswacWXsW4zaTyH9MZP9LumubGG7eOse0y0bw== jochen@jochen-desktop +old_nikon_gasparis: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpwiKTTbiaRtuloEgvTRwjDjzrYSjUOUfjZ/o7FlfvtkApA09bSbbtVpMid60TYzf2tK1ie0Y0rCnaQ0wiaSQFqGkw47VsewBOpyJC+pWXz6GLMMJUEY6viDSuUDbn7ADJqak4YscVi2vZCSwWwslA+jBqWimDdE+8hIKNqQQA3klZ1zp84HayUdJY4jt3nbpQkOpVUdE/1cggVdq523hF2u+mjyR3ctILVyyPArxPInYILZxhaS8AvX8ZPADIE5Ki0zowC2UsvbZZzauJzJQ/KuK1tvZVD2AaEg+06Kj1RWWxIlYgXpO+XYGoYEViPMHUdf1h+zt+t6UxXshWPeWd nikonas@di.uoa.gr +nikon_gasparis: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3b+t/2RQjw8d07zV30tD0qysEFNTeeAsFqazdrvPa+bbm6wZ75Gkka4+wWmVZdd56gIh4yx4L4avnnzeQfUTREgrhNmlHRPdVB5rpJNa/3bQ+J/O3SpyRcGawPKNJWlhwCWaILag0lm3O+4ukuzN2WXFxHGyiiz0FLPXS7Yps2k3OZVHPx7GhGkr+K26c3oELR/yTCCgQxrZwMpy9xOLhXgPZRlzj4Y/KQBgRojbhhrFmmKe3k7g8u2Kb/oSDl5+kSOWzV7qrvHkHDUc2K1bp+lrG6L8QNLivZzOVQ/VeBBGGRhSL5D2JdC4T7+q89dsmPQM6Zu3lWBKQk/Jw/1gZ nikonas@mpagasas-I2 +roberto_cirillo: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvkwppFE+K5MjKqtkGJN63wkcwaqZG4HkgPqMSWrXmCfDPJ3FxjDHV9aQRJYVKZObc9+SsFc9IYXwB2A8FI0XwPkCH2hfFKDVNO4TktO/SrM+4tXbEfEDWX/PduBQLootYaMEVj++p2+s/mxVnxTAMzsR4txC9tkWR4JO4VJ2cpZfM8po4p1wA4YteW6Oiv0PqUEsLtPtBHGuCgovo8WS+qxcxpeBBnewEssgis2dzDSqx5HUmaOETAxxEHflapHWQLum0JjvXsG5jlf9jL44XJPkcHXAYk3gnhtyM0moJpUya+GX7+ttfWWvwxs0tYNDXNMRn91r1hMLWmas4D+T/Q== rcirillo@rcirillo-cnr +fabio_sinibaldi: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArNhKFcJ6T08sn7kTTLf+rO9HEvgOvqfhv5HQ2sRf2tFYfjfCb0zHKnMkgW+sy5gMU10Lyx1r7juXCvqRC955uIM97m1B1Xc6sVqASVKuGPhCKfhxEaMAyBcWFdE+HYbCOPYVN+JMrcwWfbblwiZTtK1OCqaEUvDDI7cFeU68noXwggEp46T48eqMUdi541D9Y+BVx9HYAo6OCQz0+6eXwxJL+tpRcAAXIMMWv362CYHoOgIU45R7xVSMLY1k/HLrcEAblwxEaSpduCH5cWUXZE/56IyxpvP44BxZkVhNdqJLmg4hxBQWhoMNYiTZxbLay3W2TwBCM111cAtUx4M/jQ== fabio@pc-fabio +gianpaolo_coro: ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAkLUsStIPUVZVWiHyiI2poDnB70CjOJttbFLc5hBd6ViomiFil9u9q5Q0M1JBFFSv8Yfl1Rmc9zOh/52lJolxPGn8r22uGgDHVv71IJ04nS5KaRGIbv2WoZbYBc85oyZk5Fv/emY9Ace/t8icgDl5xJddeLfK6rTU64MZ7NGycIc= coro@coro-PC +katerina_iatropoulou: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA29WTITAKDhIE4lYt41hEtL3TnE+bIrlZAdAzSKySHOXPI8Q1vxanvprnL8BU0okgfZJDx3qxcTWLbwpcdWvGbO2SIA8JSKl2viQqfYDc5VtWFd4xo5z9y5BRrNDOOel+XAZjamx8lv8c44Au0ACV+jCAhnzwJA4Iso1KuNsuj2M= kiatrop@rudie +farah_karim: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzKSQSk3ntKGUW2Cy8lt/44BTK2+UxMM4W2XO4CrcwgUxxlgIfpL4UjyuSKIygRdU/lL/4xHJdRNzA7PSEiHnBhIeLiF9QWw1mO2GVdJ4/1G5J/XEZ3sL7zyEdwwks7FsnT4U9PO9drNDZ1AmIK8eDKtX9EJcOFflulOknbIHjIq29gXcXbrhQaV3rNHS8vGDkv3fkpJT9Wi8BEUMeMFYsa3k3pc3nPysCQR+xsVJ1Ht+1gpU71W7fACaI1ltYaCToPAJasU19Tz6xE3edl9/Dz6HIL5FcVNSbLFEiyQhd5oL1ITCXJOwzyqobrUUdRK/30iIBRRFW00AIGQCDV0S3 hadoop@karim-ThinkPad-S1-Yoga +luca_frosini: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDlTQulSJFayTJyOOecgsct35u7uvVQGX/Da11UZVxvJzw2sQKOMSCMBBGF9zUlcMoP/qvF425jVMM71S8kamCcqgSN528fp9W/Nhw7s15NbCE3H9tJ3B+u5ESOYsRfgogeTIyL26aIY/2rke0DoKDIMU3YlOtN/1ipt5cY9uV3ootxTM126y2WChICGo0h77M/Ta1pIccUE0XbuaA1HwlJBkfDzQ2kh5tkaC7mjeETstOQzpEoPFoVr0qwSPz1Y6l8uiedpDZejrq64Z2zRcSxjEQ1wuA9r8uO7TJQttUKK8m/dHMe6q3WAiFc9sOYe4tf/GEmziB8VloMTNCPJQiz lucafrosini@pc-frosini + +# Use the list when you want to give access to non root users +ssh_users_list: + - name: '' + authorized_keys: + - '' +# And set the following variable to true: +non_root_users: False + +manage_root_ssh_keys: True + diff --git a/ssh-keys/tasks/main.yml b/ssh-keys/tasks/main.yml new file mode 100644 index 00000000..38f14fc4 --- /dev/null +++ b/ssh-keys/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- include: root-ssh-keys.yml + when: + - user_ssh_key is defined + - manage_root_ssh_keys +- include: non-root-ssh-keys.yml + when: non_root_users + diff --git a/ssh-keys/tasks/non-root-ssh-keys.yml b/ssh-keys/tasks/non-root-ssh-keys.yml new file mode 100644 index 00000000..060d7613 --- /dev/null +++ b/ssh-keys/tasks/non-root-ssh-keys.yml @@ -0,0 +1,23 @@ +--- +- name: Install the ssh keys for non root users + authorized_key: user={{ item.0.name }} key="{{ item.1 }}" state=present + when: non_root_users + with_subelements: + - ssh_users_list + - authorized_keys + tags: + - pubkeys + - ssh_keys + +- name: Remove obsolete ssh keys if there are any + authorized_key: user={{ item.0.name }} key="{{ item.1 }}" state=absent + when: + - obsolete_ssh_key is defined + - non_root_users + with_subelements: + - ssh_users_list + - authorized_keys + tags: + - pubkeys + - ssh_keys + diff --git a/ssh-keys/tasks/root-ssh-keys.yml b/ssh-keys/tasks/root-ssh-keys.yml new file mode 100644 index 00000000..56192174 --- /dev/null +++ b/ssh-keys/tasks/root-ssh-keys.yml @@ -0,0 +1,16 @@ +--- +- name: Install the ssh keys for the authorized users + authorized_key: user=root key="{{ item }}" state=present + with_items: user_ssh_key + tags: + - pubkeys + - ssh_keys + +- name: Remove obsolete ssh keys if there are any + authorized_key: user=root key="{{ item }}" state=absent + with_items: obsolete_ssh_key + when: obsolete_ssh_key is defined + tags: + - pubkeys + - ssh_keys + diff --git a/timezone/defaults/main.yml b/timezone/defaults/main.yml new file mode 100644 index 00000000..c2387a6f --- /dev/null +++ b/timezone/defaults/main.yml @@ -0,0 +1,3 @@ +--- +timezone: "Europe/Rome" + diff --git a/timezone/tasks/main.yml b/timezone/tasks/main.yml new file mode 100644 index 00000000..3e279d20 --- /dev/null +++ b/timezone/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Write the timezone file + template: src=etc-timezone.j2 dest=/etc/timezone owner=root group=root mode=0644 + register: set_timezone + tags: + - timezone + +- name: Reconfigure the system tzdata + command: dpkg-reconfigure --frontend noninteractive tzdata + when: (set_timezone|changed) + tags: + - timezone + diff --git a/timezone/templates/etc-timezone.j2 b/timezone/templates/etc-timezone.j2 new file mode 100644 index 00000000..0b6d0093 --- /dev/null +++ b/timezone/templates/etc-timezone.j2 @@ -0,0 +1 @@ +{{ timezone }} diff --git a/tomcat-apache-requirements/files/xercesImpl.jar b/tomcat-apache-requirements/files/xercesImpl.jar new file mode 100644 index 00000000..dc63fb98 Binary files /dev/null and b/tomcat-apache-requirements/files/xercesImpl.jar differ diff --git a/tomcat-apache-requirements/meta/main.yml b/tomcat-apache-requirements/meta/main.yml new file mode 100644 index 00000000..fcc5fdb0 --- /dev/null +++ b/tomcat-apache-requirements/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: '../../library/oracle-jdk' + - role: '../../library/apache' + - role: '../../library/tomcat' + when: tomcat_m_instances is not defined +# - role: '../../library/tomcat-multiple-instances' +# when: tomcat_m_instances diff --git a/tomcat-apache-requirements/tasks/java-requirements.yml b/tomcat-apache-requirements/tasks/java-requirements.yml new file mode 100644 index 00000000..c065d71a --- /dev/null +++ b/tomcat-apache-requirements/tasks/java-requirements.yml @@ -0,0 +1,23 @@ +--- +- name: Install the apache proxy modules needed for tomcat + file: src=/etc/apache2/mods-available/{{ item }} dest=/etc/apache2/mods-enabled/{{ item }} state=link + with_items: + - proxy.load + - proxy_http.load + - proxy_ajp.load + notify: apache2 reload + tags: + - apache + - dnet + +- name: Ensure that the jre/lib/endorsed exists + file: dest={{ jdk_java_home }}/jre/lib/endorsed state=directory owner=root group=root mode=0755 + tags: + - apache + - dnet + +- name: Install the xercesImpl.jar needed by the dnet applications + copy: src=xercesImpl.jar dest={{ jdk_java_home }}/jre/lib/endorsed/xercesImpl.jar owner=root group=root mode=0644 + tags: + - apache + - dnet diff --git a/tomcat-apache-requirements/tasks/main.yml b/tomcat-apache-requirements/tasks/main.yml new file mode 100644 index 00000000..16ca11ed --- /dev/null +++ b/tomcat-apache-requirements/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- include: packages.yml +- include: java-requirements.yml diff --git a/tomcat-apache-requirements/tasks/packages.yml b/tomcat-apache-requirements/tasks/packages.yml new file mode 100644 index 00000000..a456004e --- /dev/null +++ b/tomcat-apache-requirements/tasks/packages.yml @@ -0,0 +1,18 @@ +--- +- name: Install the postgresql command line client + apt: pkg={{ item }} state=installed + with_items: + - postgresql-client + when: tomcat_install_jdbc is defined and tomcat_install_jdbc + tags: + - postgres + - postgresql + +- name: Install the mongodb client + apt: pkg={{ item }} state=installed + with_items: + - mongodb-clients + when: mongodb is not defined + tags: + - mongodb + diff --git a/tomcat-multiple-instances/defaults/main.yml b/tomcat-multiple-instances/defaults/main.yml new file mode 100644 index 00000000..eadeb720 --- /dev/null +++ b/tomcat-multiple-instances/defaults/main.yml @@ -0,0 +1,47 @@ +--- +#tomcat_version: 7 +#tomcat_catalina_home_dir: '/usr/share/tomcat{{ tomcat_version }}' +# Disable the main tomcat instance +tomcat_service_enabled: False + +tomcat_m_instances_base_path: '/var/lib/tomcat_instances' +tomcat_m_instances_logdir_base: '/var/log/tomcat_instances' +tomcat_m_cache_base: '/var/cache/tomcat-instances' +tomcat_m_default_user: tomcat7 +tomcat_m_use_default_user: True +tomcat_m_user_home: False +tomcat_m_default_user_shell: /bin/false +# Workaround for the '50 days shutdown' bug, until a fixed package will be available +tomcat_m_shutdown_port: -1 +tomcat_m_shutdown_pwd: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,hexdigits,punctuation') }}" +tomcat_m_max_threads: 200 +tomcat_m_min_heap_size: 2048m +tomcat_m_heap_size: '{{ tomcat_m_min_heap_size }}' +tomcat_m_permgen_size: 512m +tomcat_m_restart_timeout: 300 +# -server -Djava.awt.headless=true are always used. No need to specify them +tomcat_m_java_opts: "-Xms{{ tomcat_m_min_heap_size }} -Xmx{{ tomcat_m_heap_size }} -XX:MaxPermSize={{ tomcat_m_permgen_size }}" +tomcat_m_java_gc_opts: "-XX:+UseConcMarkSweepGC" +# Use "-XX:+UseConcMarkSweepGC" to enable the CMS garbage collector (improved +# response time). If you use that option and you run Tomcat on a machine with +# exactly one CPU chip that contains one or two cores, you should also add +# the "-XX:+CMSIncrementalMode" option. +#tomcat_m_other_java_opts: "-Djsse.enableSNIExtension=false" +tomcat_m_other_java_opts: "" +tomcat_m_autodeploy: False + +# JMX and debugging +tomcat_m_enable_remote_debugging: False +tomcat_m_remote_debugging_port: 8100 +tomcat_m_jmx_enabled: False +tomcat_m_jmx_auth_enabled: False +#tomcat_m_jmx_auth_dir: '{{ tomcat_m_instances_base_path }}' +# tomcat_m_jmx_monitorpass: define_in_a_vault_file +# tomcat_m_jmx_controlpass: define_in_a_vault_file + + +# This is only an example. Insert a line for each tomcat instance. 'app_contexts' can be used to automatically configure apache or nginx virtualhost http/ajp proxy +# +#tomcat_m_instances: +# - { http_enabled: True, http_port: '8180', http_address: '0.0.0.0', ajp_enabled: False, ajp_port: '8109', ajp_address: '127.0.0.1', restart_timeout: '{{ tomcat_m_restart_timeout }}', shutdown_port: '8105', java_home: '{{ jdk_java_home }}', user: '{{ tomcat_m_default_user }}', user_home: '{{ tomcat_m_instances_base_path }}', user_shell: '{{ tomcat_m_default_user_shell }}', instance_path: '{{ tomcat_m_instances_base_path }}/8180', max_threads: '{{ tomcat_m_max_threads }}', autodeploy: '{{ tomcat_m_autodeploy }}', default_conf: True, java_opts: '{{ tomcat_m_java_opts }}', java_gc_opts: '{{ tomcat_m_java_gc_opts }}', other_java_opts: '{{ tomcat_m_other_java_opts }}', jmx_enabled: '{{ tomcat_m_jmx_enabled }}', jmx_auth_enabled: '{{ tomcat_m_jmx_auth_enabled }}', jmx_auth_dir: '{{ tomcat_m_instances_base_path }}/8180/conf', jmx_port: '8182', jmx_monitorpass: '{{ set_in_a_vault_file }}', jmx_controlpass: '{{ set_in_a_vault_file }}', remote_debugging: '{{ tomcat_m_enable_remote_debugging }}', remote_debugging_port: '8100', access_log_enabled: True, log_rotation_freq: daily, log_retain: 30, allowed_hosts: [ 'xxx.xxx.xxx.xxx/32', 'yyy.yyy.yyy.yyy/32' ], app_contexts: [ 'app1', 'app2' ] } + diff --git a/tomcat-multiple-instances/files/catalina.properties b/tomcat-multiple-instances/files/catalina.properties new file mode 100644 index 00000000..8dae2554 --- /dev/null +++ b/tomcat-multiple-instances/files/catalina.properties @@ -0,0 +1,119 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# List of comma-separated packages that start with or equal this string +# will cause a security exception to be thrown when +# passed to checkPackageAccess unless the +# corresponding RuntimePermission ("accessClassInPackage."+package) has +# been granted. +package.access=sun.,org.apache.catalina.,org.apache.coyote.,org.apache.tomcat.,org.apache.jasper. +# +# List of comma-separated packages that start with or equal this string +# will cause a security exception to be thrown when +# passed to checkPackageDefinition unless the +# corresponding RuntimePermission ("defineClassInPackage."+package) has +# been granted. +# +# by default, no packages are restricted for definition, and none of +# the class loaders supplied with the JDK call checkPackageDefinition. +# +package.definition=sun.,java.,org.apache.catalina.,org.apache.coyote.,org.apache.tomcat.,org.apache.jasper. + +# +# +# List of comma-separated paths defining the contents of the "common" +# classloader. Prefixes should be used to define what is the repository type. +# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute. +# If left as blank,the JVM system loader will be used as Catalina's "common" +# loader. +# Examples: +# "foo": Add this folder as a class repository +# "foo/*.jar": Add all the JARs of the specified folder as class +# repositories +# "foo/bar.jar": Add bar.jar as a class repository +common.loader=${catalina.base}/lib,${catalina.base}/lib/*.jar,${catalina.home}/lib,${catalina.home}/lib/*.jar,${catalina.base}/common/classes,${catalina.base}/common/*.jar + +# +# List of comma-separated paths defining the contents of the "server" +# classloader. Prefixes should be used to define what is the repository type. +# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute. +# If left as blank, the "common" loader will be used as Catalina's "server" +# loader. +# Examples: +# "foo": Add this folder as a class repository +# "foo/*.jar": Add all the JARs of the specified folder as class +# repositories +# "foo/bar.jar": Add bar.jar as a class repository +server.loader=${catalina.base}/server/classes,${catalina.base}/server/*.jar + +# +# List of comma-separated paths defining the contents of the "shared" +# classloader. Prefixes should be used to define what is the repository type. +# Path may be relative to the CATALINA_BASE path or absolute. If left as blank, +# the "common" loader will be used as Catalina's "shared" loader. +# Examples: +# "foo": Add this folder as a class repository +# "foo/*.jar": Add all the JARs of the specified folder as class +# repositories +# "foo/bar.jar": Add bar.jar as a class repository +# Please note that for single jars, e.g. bar.jar, you need the URL form +# starting with file:. +shared.loader=${catalina.base}/shared/classes,${catalina.base}/shared/*.jar + +# List of JAR files that should not be scanned for configuration information +# such as web fragments, TLD files etc. It must be a comma separated list of +# JAR file names. +# The JARs listed below include: +# - Tomcat Bootstrap JARs +# - Tomcat API JARs +# - Catalina JARs +# - Jasper JARs +# - Tomcat JARs +# - Common non-Tomcat JARs +# - Sun JDK JARs +# - Apple JDK JARs +tomcat.util.scan.DefaultJarScanner.jarsToSkip=\ +bootstrap.jar,commons-daemon.jar,tomcat-juli.jar,\ +annotations-api.jar,el-api.jar,jsp-api.jar,servlet-api.jar,\ +catalina.jar,catalina-ant.jar,catalina-ha.jar,catalina-tribes.jar,\ +jasper.jar,jasper-el.jar,ecj-*.jar,\ +tomcat-api.jar,tomcat-util.jar,tomcat-coyote.jar,tomcat-dbcp.jar,\ +tomcat-i18n-en.jar,tomcat-i18n-es.jar,tomcat-i18n-fr.jar,tomcat-i18n-ja.jar,\ +tomcat-juli-adapters.jar,catalina-jmx-remote.jar,catalina-ws.jar,\ +tomcat-jdbc.jar,\ +commons-beanutils*.jar,commons-codec*.jar,commons-collections*.jar,\ +commons-dbcp*.jar,commons-digester*.jar,commons-fileupload*.jar,\ +commons-httpclient*.jar,commons-io*.jar,commons-lang*.jar,commons-logging*.jar,\ +commons-math*.jar,commons-pool*.jar,\ +jstl.jar,\ +geronimo-spec-jaxrpc*.jar,wsdl4j*.jar,\ +ant.jar,ant-junit*.jar,aspectj*.jar,jmx.jar,h2*.jar,hibernate*.jar,httpclient*.jar,\ +jmx-tools.jar,jta*.jar,log4j*.jar,mail*.jar,slf4j*.jar,\ +xercesImpl.jar,xmlParserAPIs.jar,xml-apis.jar,\ +dnsns.jar,ldapsec.jar,localedata.jar,sunjce_provider.jar,sunmscapi.jar,\ +sunpkcs11.jar,jhall.jar,tools.jar,\ +sunec.jar,zipfs.jar,\ +apple_provider.jar,AppleScriptEngine.jar,CoreAudio.jar,dns_sd.jar,\ +j3daudio.jar,j3dcore.jar,j3dutils.jar,jai_core.jar,jai_codec.jar,\ +mlibwrapper_jai.jar,MRJToolkit.jar,vecmath.jar,\ +junit.jar,junit-*.jar,ant-launcher.jar + +# +# String cache configuration. +tomcat.util.buf.StringCache.byte.enabled=true +#tomcat.util.buf.StringCache.char.enabled=true +#tomcat.util.buf.StringCache.trainThreshold=500000 +#tomcat.util.buf.StringCache.cacheSize=5000 diff --git a/tomcat-multiple-instances/files/context.xml b/tomcat-multiple-instances/files/context.xml new file mode 100644 index 00000000..745bf953 --- /dev/null +++ b/tomcat-multiple-instances/files/context.xml @@ -0,0 +1,35 @@ + + + + + + + WEB-INF/web.xml + + + + + + + + \ No newline at end of file diff --git a/tomcat-multiple-instances/files/jmxremote.access b/tomcat-multiple-instances/files/jmxremote.access new file mode 100644 index 00000000..c5aab07e --- /dev/null +++ b/tomcat-multiple-instances/files/jmxremote.access @@ -0,0 +1,2 @@ +monitorRole readonly +controlRole readwrite diff --git a/tomcat-multiple-instances/files/logging.properties b/tomcat-multiple-instances/files/logging.properties new file mode 100644 index 00000000..6eeb1814 --- /dev/null +++ b/tomcat-multiple-instances/files/logging.properties @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler + +.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler + +############################################################ +# Handler specific properties. +# Describes specific configuration info for Handlers. +############################################################ + +1catalina.org.apache.juli.FileHandler.level = FINE +1catalina.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +1catalina.org.apache.juli.FileHandler.prefix = catalina. + +2localhost.org.apache.juli.FileHandler.level = FINE +2localhost.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +2localhost.org.apache.juli.FileHandler.prefix = localhost. + +java.util.logging.ConsoleHandler.level = FINE +java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter + +############################################################ +# Facility specific properties. +# Provides extra control for each logger. +############################################################ + +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler + +# For example, set the com.xyz.foo logger to only log SEVERE +# messages: +#org.apache.catalina.startup.ContextConfig.level = FINE +#org.apache.catalina.startup.HostConfig.level = FINE +#org.apache.catalina.session.ManagerBase.level = FINE +#org.apache.catalina.core.AprLifecycleListener.level=FINE diff --git a/tomcat-multiple-instances/files/policy.d/01system.policy b/tomcat-multiple-instances/files/policy.d/01system.policy new file mode 100644 index 00000000..8e02c821 --- /dev/null +++ b/tomcat-multiple-instances/files/policy.d/01system.policy @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// ============================================================================ +// catalina.corepolicy - Security Policy Permissions for Tomcat 6 +// +// This file contains a default set of security policies to be enforced (by the +// JVM) when Catalina is executed with the "-security" option. In addition +// to the permissions granted here, the following additional permissions are +// granted to the codebase specific to each web application: +// +// * Read access to the document root directory +// +// $Id: catalina.policy 609294 2008-01-06 11:43:46Z markt $ +// ============================================================================ + + +// ========== SYSTEM CODE PERMISSIONS ========================================= + + +// These permissions apply to javac +grant codeBase "file:${java.home}/lib/-" { + permission java.security.AllPermission; +}; + +// These permissions apply to all shared system extensions +grant codeBase "file:${java.home}/jre/lib/ext/-" { + permission java.security.AllPermission; +}; + +// These permissions apply to javac when ${java.home] points at $JAVA_HOME/jre +grant codeBase "file:${java.home}/../lib/-" { + permission java.security.AllPermission; +}; + +// These permissions apply to all shared system extensions when +// ${java.home} points at $JAVA_HOME/jre +grant codeBase "file:${java.home}/lib/ext/-" { + permission java.security.AllPermission; +}; diff --git a/tomcat-multiple-instances/files/policy.d/02debian.policy b/tomcat-multiple-instances/files/policy.d/02debian.policy new file mode 100644 index 00000000..582c47c1 --- /dev/null +++ b/tomcat-multiple-instances/files/policy.d/02debian.policy @@ -0,0 +1,10 @@ +// These permissions apply to all JARs from Debian packages +grant codeBase "file:/usr/share/java/-" { + permission java.security.AllPermission; +}; +grant codeBase "file:/usr/share/maven-repo/-" { + permission java.security.AllPermission; +}; +grant codeBase "file:/usr/share/ant/lib/-" { + permission java.security.AllPermission; +}; diff --git a/tomcat-multiple-instances/files/policy.d/03catalina.policy b/tomcat-multiple-instances/files/policy.d/03catalina.policy new file mode 100644 index 00000000..2de15182 --- /dev/null +++ b/tomcat-multiple-instances/files/policy.d/03catalina.policy @@ -0,0 +1,32 @@ +// ========== CATALINA CODE PERMISSIONS ======================================= + + +// These permissions apply to the logging API +grant codeBase "file:${catalina.home}/bin/tomcat-juli.jar" { + permission java.util.PropertyPermission "java.util.logging.config.class", "read"; + permission java.util.PropertyPermission "java.util.logging.config.file", "read"; + permission java.lang.RuntimePermission "shutdownHooks"; + permission java.io.FilePermission "${catalina.base}${file.separator}conf${file.separator}logging.properties", "read"; + permission java.util.PropertyPermission "catalina.base", "read"; + permission java.util.logging.LoggingPermission "control"; + permission java.io.FilePermission "${catalina.base}${file.separator}logs", "read, write"; + permission java.io.FilePermission "${catalina.base}${file.separator}logs${file.separator}*", "read, write"; + permission java.lang.RuntimePermission "getClassLoader"; + permission java.lang.RuntimePermission "setContextClassLoader"; + // To enable per context logging configuration, permit read access to the appropriate file. + // Be sure that the logging configuration is secure before enabling such access + // eg for the examples web application: + // permission java.io.FilePermission "${catalina.base}${file.separator}webapps${file.separator}examples${file.separator}WEB-INF${file.separator}classes${file.separator}logging.properties", "read"; +}; + +// These permissions apply to the server startup code +grant codeBase "file:${catalina.home}/bin/bootstrap.jar" { + permission java.security.AllPermission; +}; + +// These permissions apply to the servlet API classes +// and those that are shared across all class loaders +// located in the "lib" directory +grant codeBase "file:${catalina.home}/lib/-" { + permission java.security.AllPermission; +}; diff --git a/tomcat-multiple-instances/files/policy.d/04webapps.policy b/tomcat-multiple-instances/files/policy.d/04webapps.policy new file mode 100644 index 00000000..74af20de --- /dev/null +++ b/tomcat-multiple-instances/files/policy.d/04webapps.policy @@ -0,0 +1,59 @@ +// ========== WEB APPLICATION PERMISSIONS ===================================== + + +// These permissions are granted by default to all web applications +// In addition, a web application will be given a read FilePermission +// and JndiPermission for all files and directories in its document root. +grant { + // Required for JNDI lookup of named JDBC DataSource's and + // javamail named MimePart DataSource used to send mail + permission java.util.PropertyPermission "java.home", "read"; + permission java.util.PropertyPermission "java.naming.*", "read"; + permission java.util.PropertyPermission "javax.sql.*", "read"; + + // OS Specific properties to allow read access + permission java.util.PropertyPermission "os.name", "read"; + permission java.util.PropertyPermission "os.version", "read"; + permission java.util.PropertyPermission "os.arch", "read"; + permission java.util.PropertyPermission "file.separator", "read"; + permission java.util.PropertyPermission "path.separator", "read"; + permission java.util.PropertyPermission "line.separator", "read"; + + // JVM properties to allow read access + permission java.util.PropertyPermission "java.version", "read"; + permission java.util.PropertyPermission "java.vendor", "read"; + permission java.util.PropertyPermission "java.vendor.url", "read"; + permission java.util.PropertyPermission "java.class.version", "read"; + permission java.util.PropertyPermission "java.specification.version", "read"; + permission java.util.PropertyPermission "java.specification.vendor", "read"; + permission java.util.PropertyPermission "java.specification.name", "read"; + + permission java.util.PropertyPermission "java.vm.specification.version", "read"; + permission java.util.PropertyPermission "java.vm.specification.vendor", "read"; + permission java.util.PropertyPermission "java.vm.specification.name", "read"; + permission java.util.PropertyPermission "java.vm.version", "read"; + permission java.util.PropertyPermission "java.vm.vendor", "read"; + permission java.util.PropertyPermission "java.vm.name", "read"; + + // Required for OpenJMX + permission java.lang.RuntimePermission "getAttribute"; + + // Allow read of JAXP compliant XML parser debug + permission java.util.PropertyPermission "jaxp.debug", "read"; + + // Precompiled JSPs need access to this package. + permission java.lang.RuntimePermission "accessClassInPackage.org.apache.jasper.runtime"; + permission java.lang.RuntimePermission "accessClassInPackage.org.apache.jasper.runtime.*"; + + // Example JSPs need those to work properly + permission java.lang.RuntimePermission "accessClassInPackage.org.apache.jasper.el"; + permission java.lang.RuntimePermission "accessDeclaredMembers"; + + // Precompiled JSPs need access to this system property. + permission java.util.PropertyPermission "org.apache.jasper.runtime.BodyContentImpl.LIMIT_BUFFER", "read"; + + // java.io.tmpdir should be usable as a temporary file directory + permission java.util.PropertyPermission "java.io.tmpdir", "read"; + permission java.io.FilePermission "${java.io.tmpdir}/-", "read,write,delete"; + +}; diff --git a/tomcat-multiple-instances/files/policy.d/50local.policy b/tomcat-multiple-instances/files/policy.d/50local.policy new file mode 100644 index 00000000..3f15a8d2 --- /dev/null +++ b/tomcat-multiple-instances/files/policy.d/50local.policy @@ -0,0 +1,32 @@ +// You can assign additional permissions to particular web applications by +// adding additional "grant" entries here, based on the code base for that +// application, /WEB-INF/classes/, or /WEB-INF/lib/ jar files. +// +// Different permissions can be granted to JSP pages, classes loaded from +// the /WEB-INF/classes/ directory, all jar files in the /WEB-INF/lib/ +// directory, or even to individual jar files in the /WEB-INF/lib/ directory. +// +// For instance, assume that the standard "examples" application +// included a JDBC driver that needed to establish a network connection to the +// corresponding database and used the scrape taglib to get the weather from +// the NOAA web server. You might create a "grant" entries like this: +// +// The permissions granted to the context root directory apply to JSP pages. +// grant codeBase "file:${catalina.base}/webapps/examples/-" { +// permission java.net.SocketPermission "dbhost.mycompany.com:5432", "connect"; +// permission java.net.SocketPermission "*.noaa.gov:80", "connect"; +// }; +// +// The permissions granted to the context WEB-INF/classes directory +// grant codeBase "file:${catalina.base}/webapps/examples/WEB-INF/classes/-" { +// }; +// +// The permission granted to your JDBC driver +// grant codeBase "jar:file:${catalina.base}/webapps/examples/WEB-INF/lib/driver.jar!/-" { +// permission java.net.SocketPermission "dbhost.mycompany.com:5432", "connect"; +// }; +// The permission granted to the scrape taglib +// grant codeBase "jar:file:${catalina.base}/webapps/examples/WEB-INF/lib/scrape.jar!/-" { +// permission java.net.SocketPermission "*.noaa.gov:80", "connect"; +// }; + diff --git a/tomcat-multiple-instances/files/tomcat-users.xml b/tomcat-multiple-instances/files/tomcat-users.xml new file mode 100644 index 00000000..7f022ffa --- /dev/null +++ b/tomcat-multiple-instances/files/tomcat-users.xml @@ -0,0 +1,36 @@ + + + + + + + diff --git a/tomcat-multiple-instances/files/web.xml b/tomcat-multiple-instances/files/web.xml new file mode 100644 index 00000000..cc8383cb --- /dev/null +++ b/tomcat-multiple-instances/files/web.xml @@ -0,0 +1,4283 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + default + org.apache.catalina.servlets.DefaultServlet + + debug + 0 + + + listings + false + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + jsp + org.apache.jasper.servlet.JspServlet + + fork + false + + + xpoweredBy + false + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + default + / + + + + + jsp + *.jsp + *.jspx + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 30 + + + + + + + + + + + + 123 + application/vnd.lotus-1-2-3 + + + 3dml + text/vnd.in3d.3dml + + + 3g2 + video/3gpp2 + + + 3gp + video/3gpp + + + 7z + application/x-7z-compressed + + + aab + application/x-authorware-bin + + + aac + audio/x-aac + + + aam + application/x-authorware-map + + + aas + application/x-authorware-seg + + + abs + audio/x-mpeg + + + abw + application/x-abiword + + + ac + application/pkix-attr-cert + + + acc + application/vnd.americandynamics.acc + + + ace + application/x-ace-compressed + + + acu + application/vnd.acucobol + + + acutc + application/vnd.acucorp + + + adp + audio/adpcm + + + aep + application/vnd.audiograph + + + afm + application/x-font-type1 + + + afp + application/vnd.ibm.modcap + + + ahead + application/vnd.ahead.space + + + ai + application/postscript + + + aif + audio/x-aiff + + + aifc + audio/x-aiff + + + aiff + audio/x-aiff + + + aim + application/x-aim + + + air + application/vnd.adobe.air-application-installer-package+zip + + + ait + application/vnd.dvb.ait + + + ami + application/vnd.amiga.ami + + + anx + application/annodex + + + apk + application/vnd.android.package-archive + + + application + application/x-ms-application + + + apr + application/vnd.lotus-approach + + + art + image/x-jg + + + asc + application/pgp-signature + + + asf + video/x-ms-asf + + + asm + text/x-asm + + + aso + application/vnd.accpac.simply.aso + + + asx + video/x-ms-asf + + + atc + application/vnd.acucorp + + + atom + application/atom+xml + + + atomcat + application/atomcat+xml + + + atomsvc + application/atomsvc+xml + + + atx + application/vnd.antix.game-component + + + au + audio/basic + + + avi + video/x-msvideo + + + avx + video/x-rad-screenplay + + + aw + application/applixware + + + axa + audio/annodex + + + axv + video/annodex + + + azf + application/vnd.airzip.filesecure.azf + + + azs + application/vnd.airzip.filesecure.azs + + + azw + application/vnd.amazon.ebook + + + bat + application/x-msdownload + + + bcpio + application/x-bcpio + + + bdf + application/x-font-bdf + + + bdm + application/vnd.syncml.dm+wbxml + + + bed + application/vnd.realvnc.bed + + + bh2 + application/vnd.fujitsu.oasysprs + + + bin + application/octet-stream + + + bmi + application/vnd.bmi + + + bmp + image/bmp + + + body + text/html + + + book + application/vnd.framemaker + + + box + application/vnd.previewsystems.box + + + boz + application/x-bzip2 + + + bpk + application/octet-stream + + + btif + image/prs.btif + + + bz + application/x-bzip + + + bz2 + application/x-bzip2 + + + c + text/x-c + + + c11amc + application/vnd.cluetrust.cartomobile-config + + + c11amz + application/vnd.cluetrust.cartomobile-config-pkg + + + c4d + application/vnd.clonk.c4group + + + c4f + application/vnd.clonk.c4group + + + c4g + application/vnd.clonk.c4group + + + c4p + application/vnd.clonk.c4group + + + c4u + application/vnd.clonk.c4group + + + cab + application/vnd.ms-cab-compressed + + + cap + application/vnd.tcpdump.pcap + + + car + application/vnd.curl.car + + + cat + application/vnd.ms-pki.seccat + + + cc + text/x-c + + + cct + application/x-director + + + ccxml + application/ccxml+xml + + + cdbcmsg + application/vnd.contact.cmsg + + + cdf + application/x-cdf + + + cdkey + application/vnd.mediastation.cdkey + + + cdmia + application/cdmi-capability + + + cdmic + application/cdmi-container + + + cdmid + application/cdmi-domain + + + cdmio + application/cdmi-object + + + cdmiq + application/cdmi-queue + + + cdx + chemical/x-cdx + + + cdxml + application/vnd.chemdraw+xml + + + cdy + application/vnd.cinderella + + + cer + application/pkix-cert + + + cgm + image/cgm + + + chat + application/x-chat + + + chm + application/vnd.ms-htmlhelp + + + chrt + application/vnd.kde.kchart + + + cif + chemical/x-cif + + + cii + application/vnd.anser-web-certificate-issue-initiation + + + cil + application/vnd.ms-artgalry + + + cla + application/vnd.claymore + + + class + application/java + + + clkk + application/vnd.crick.clicker.keyboard + + + clkp + application/vnd.crick.clicker.palette + + + clkt + application/vnd.crick.clicker.template + + + clkw + application/vnd.crick.clicker.wordbank + + + clkx + application/vnd.crick.clicker + + + clp + application/x-msclip + + + cmc + application/vnd.cosmocaller + + + cmdf + chemical/x-cmdf + + + cml + chemical/x-cml + + + cmp + application/vnd.yellowriver-custom-menu + + + cmx + image/x-cmx + + + cod + application/vnd.rim.cod + + + com + application/x-msdownload + + + conf + text/plain + + + cpio + application/x-cpio + + + cpp + text/x-c + + + cpt + application/mac-compactpro + + + crd + application/x-mscardfile + + + crl + application/pkix-crl + + + crt + application/x-x509-ca-cert + + + cryptonote + application/vnd.rig.cryptonote + + + csh + application/x-csh + + + csml + chemical/x-csml + + + csp + application/vnd.commonspace + + + css + text/css + + + cst + application/x-director + + + csv + text/csv + + + cu + application/cu-seeme + + + curl + text/vnd.curl + + + cww + application/prs.cww + + + cxt + application/x-director + + + cxx + text/x-c + + + dae + model/vnd.collada+xml + + + daf + application/vnd.mobius.daf + + + dataless + application/vnd.fdsn.seed + + + davmount + application/davmount+xml + + + dcr + application/x-director + + + dcurl + text/vnd.curl.dcurl + + + dd2 + application/vnd.oma.dd2+xml + + + ddd + application/vnd.fujixerox.ddd + + + deb + application/x-debian-package + + + def + text/plain + + + deploy + application/octet-stream + + + der + application/x-x509-ca-cert + + + dfac + application/vnd.dreamfactory + + + dib + image/bmp + + + dic + text/x-c + + + dir + application/x-director + + + dis + application/vnd.mobius.dis + + + dist + application/octet-stream + + + distz + application/octet-stream + + + djv + image/vnd.djvu + + + djvu + image/vnd.djvu + + + dll + application/x-msdownload + + + dmg + application/octet-stream + + + dmp + application/vnd.tcpdump.pcap + + + dms + application/octet-stream + + + dna + application/vnd.dna + + + doc + application/msword + + + docm + application/vnd.ms-word.document.macroenabled.12 + + + docx + application/vnd.openxmlformats-officedocument.wordprocessingml.document + + + dot + application/msword + + + dotm + application/vnd.ms-word.template.macroenabled.12 + + + dotx + application/vnd.openxmlformats-officedocument.wordprocessingml.template + + + dp + application/vnd.osgi.dp + + + dpg + application/vnd.dpgraph + + + dra + audio/vnd.dra + + + dsc + text/prs.lines.tag + + + dssc + application/dssc+der + + + dtb + application/x-dtbook+xml + + + dtd + application/xml-dtd + + + dts + audio/vnd.dts + + + dtshd + audio/vnd.dts.hd + + + dump + application/octet-stream + + + dv + video/x-dv + + + dvb + video/vnd.dvb.file + + + dvi + application/x-dvi + + + dwf + model/vnd.dwf + + + dwg + image/vnd.dwg + + + dxf + image/vnd.dxf + + + dxp + application/vnd.spotfire.dxp + + + dxr + application/x-director + + + ecelp4800 + audio/vnd.nuera.ecelp4800 + + + ecelp7470 + audio/vnd.nuera.ecelp7470 + + + ecelp9600 + audio/vnd.nuera.ecelp9600 + + + ecma + application/ecmascript + + + edm + application/vnd.novadigm.edm + + + edx + application/vnd.novadigm.edx + + + efif + application/vnd.picsel + + + ei6 + application/vnd.pg.osasli + + + elc + application/octet-stream + + + eml + message/rfc822 + + + emma + application/emma+xml + + + eol + audio/vnd.digital-winds + + + eot + application/vnd.ms-fontobject + + + eps + application/postscript + + + epub + application/epub+zip + + + es3 + application/vnd.eszigno3+xml + + + esf + application/vnd.epson.esf + + + et3 + application/vnd.eszigno3+xml + + + etx + text/x-setext + + + exe + application/octet-stream + + + exi + application/exi + + + ext + application/vnd.novadigm.ext + + + ez + application/andrew-inset + + + ez2 + application/vnd.ezpix-album + + + ez3 + application/vnd.ezpix-package + + + f + text/x-fortran + + + f4v + video/x-f4v + + + f77 + text/x-fortran + + + f90 + text/x-fortran + + + fbs + image/vnd.fastbidsheet + + + fcs + application/vnd.isac.fcs + + + fdf + application/vnd.fdf + + + fe_launch + application/vnd.denovo.fcselayout-link + + + fg5 + application/vnd.fujitsu.oasysgp + + + fgd + application/x-director + + + fh + image/x-freehand + + + fh4 + image/x-freehand + + + fh5 + image/x-freehand + + + fh7 + image/x-freehand + + + fhc + image/x-freehand + + + fig + application/x-xfig + + + flac + audio/flac + + + fli + video/x-fli + + + flo + application/vnd.micrografx.flo + + + flv + video/x-flv + + + flw + application/vnd.kde.kivio + + + flx + text/vnd.fmi.flexstor + + + fly + text/vnd.fly + + + fm + application/vnd.framemaker + + + fnc + application/vnd.frogans.fnc + + + for + text/x-fortran + + + fpx + image/vnd.fpx + + + frame + application/vnd.framemaker + + + fsc + application/vnd.fsc.weblaunch + + + fst + image/vnd.fst + + + ftc + application/vnd.fluxtime.clip + + + fti + application/vnd.anser-web-funds-transfer-initiation + + + fvt + video/vnd.fvt + + + fxp + application/vnd.adobe.fxp + + + fxpl + application/vnd.adobe.fxp + + + fzs + application/vnd.fuzzysheet + + + g2w + application/vnd.geoplan + + + g3 + image/g3fax + + + g3w + application/vnd.geospace + + + gac + application/vnd.groove-account + + + gbr + application/rpki-ghostbusters + + + gdl + model/vnd.gdl + + + geo + application/vnd.dynageo + + + gex + application/vnd.geometry-explorer + + + ggb + application/vnd.geogebra.file + + + ggt + application/vnd.geogebra.tool + + + ghf + application/vnd.groove-help + + + gif + image/gif + + + gim + application/vnd.groove-identity-message + + + gmx + application/vnd.gmx + + + gnumeric + application/x-gnumeric + + + gph + application/vnd.flographit + + + gqf + application/vnd.grafeq + + + gqs + application/vnd.grafeq + + + gram + application/srgs + + + gre + application/vnd.geometry-explorer + + + grv + application/vnd.groove-injector + + + grxml + application/srgs+xml + + + gsf + application/x-font-ghostscript + + + gtar + application/x-gtar + + + gtm + application/vnd.groove-tool-message + + + gtw + model/vnd.gtw + + + gv + text/vnd.graphviz + + + gxt + application/vnd.geonext + + + gz + application/x-gzip + + + h + text/x-c + + + h261 + video/h261 + + + h263 + video/h263 + + + h264 + video/h264 + + + hal + application/vnd.hal+xml + + + hbci + application/vnd.hbci + + + hdf + application/x-hdf + + + hh + text/x-c + + + hlp + application/winhlp + + + hpgl + application/vnd.hp-hpgl + + + hpid + application/vnd.hp-hpid + + + hps + application/vnd.hp-hps + + + hqx + application/mac-binhex40 + + + htc + text/x-component + + + htke + application/vnd.kenameaapp + + + htm + text/html + + + html + text/html + + + hvd + application/vnd.yamaha.hv-dic + + + hvp + application/vnd.yamaha.hv-voice + + + hvs + application/vnd.yamaha.hv-script + + + i2g + application/vnd.intergeo + + + icc + application/vnd.iccprofile + + + ice + x-conference/x-cooltalk + + + icm + application/vnd.iccprofile + + + ico + image/x-icon + + + ics + text/calendar + + + ief + image/ief + + + ifb + text/calendar + + + ifm + application/vnd.shana.informed.formdata + + + iges + model/iges + + + igl + application/vnd.igloader + + + igm + application/vnd.insors.igm + + + igs + model/iges + + + igx + application/vnd.micrografx.igx + + + iif + application/vnd.shana.informed.interchange + + + imp + application/vnd.accpac.simply.imp + + + ims + application/vnd.ms-ims + + + in + text/plain + + + ink + application/inkml+xml + + + inkml + application/inkml+xml + + + iota + application/vnd.astraea-software.iota + + + ipfix + application/ipfix + + + ipk + application/vnd.shana.informed.package + + + irm + application/vnd.ibm.rights-management + + + irp + application/vnd.irepository.package+xml + + + iso + application/octet-stream + + + itp + application/vnd.shana.informed.formtemplate + + + ivp + application/vnd.immervision-ivp + + + ivu + application/vnd.immervision-ivu + + + jad + text/vnd.sun.j2me.app-descriptor + + + jam + application/vnd.jam + + + jar + application/java-archive + + + java + text/x-java-source + + + jisp + application/vnd.jisp + + + jlt + application/vnd.hp-jlyt + + + jnlp + application/x-java-jnlp-file + + + joda + application/vnd.joost.joda-archive + + + jpe + image/jpeg + + + jpeg + image/jpeg + + + jpg + image/jpeg + + + jpgm + video/jpm + + + jpgv + video/jpeg + + + jpm + video/jpm + + + js + application/javascript + + + jsf + text/plain + + + json + application/json + + + jspf + text/plain + + + kar + audio/midi + + + karbon + application/vnd.kde.karbon + + + kfo + application/vnd.kde.kformula + + + kia + application/vnd.kidspiration + + + kml + application/vnd.google-earth.kml+xml + + + kmz + application/vnd.google-earth.kmz + + + kne + application/vnd.kinar + + + knp + application/vnd.kinar + + + kon + application/vnd.kde.kontour + + + kpr + application/vnd.kde.kpresenter + + + kpt + application/vnd.kde.kpresenter + + + ksp + application/vnd.kde.kspread + + + ktr + application/vnd.kahootz + + + ktx + image/ktx + + + ktz + application/vnd.kahootz + + + kwd + application/vnd.kde.kword + + + kwt + application/vnd.kde.kword + + + lasxml + application/vnd.las.las+xml + + + latex + application/x-latex + + + lbd + application/vnd.llamagraphics.life-balance.desktop + + + lbe + application/vnd.llamagraphics.life-balance.exchange+xml + + + les + application/vnd.hhe.lesson-player + + + lha + application/octet-stream + + + link66 + application/vnd.route66.link66+xml + + + list + text/plain + + + list3820 + application/vnd.ibm.modcap + + + listafp + application/vnd.ibm.modcap + + + log + text/plain + + + lostxml + application/lost+xml + + + lrf + application/octet-stream + + + lrm + application/vnd.ms-lrm + + + ltf + application/vnd.frogans.ltf + + + lvp + audio/vnd.lucent.voice + + + lwp + application/vnd.lotus-wordpro + + + lzh + application/octet-stream + + + m13 + application/x-msmediaview + + + m14 + application/x-msmediaview + + + m1v + video/mpeg + + + m21 + application/mp21 + + + m2a + audio/mpeg + + + m2v + video/mpeg + + + m3a + audio/mpeg + + + m3u + audio/x-mpegurl + + + m3u8 + application/vnd.apple.mpegurl + + + m4a + audio/mp4 + + + m4b + audio/mp4 + + + m4r + audio/mp4 + + + m4u + video/vnd.mpegurl + + + m4v + video/mp4 + + + ma + application/mathematica + + + mac + image/x-macpaint + + + mads + application/mads+xml + + + mag + application/vnd.ecowin.chart + + + maker + application/vnd.framemaker + + + man + text/troff + + + mathml + application/mathml+xml + + + mb + application/mathematica + + + mbk + application/vnd.mobius.mbk + + + mbox + application/mbox + + + mc1 + application/vnd.medcalcdata + + + mcd + application/vnd.mcd + + + mcurl + text/vnd.curl.mcurl + + + mdb + application/x-msaccess + + + mdi + image/vnd.ms-modi + + + me + text/troff + + + mesh + model/mesh + + + meta4 + application/metalink4+xml + + + mets + application/mets+xml + + + mfm + application/vnd.mfmp + + + mft + application/rpki-manifest + + + mgp + application/vnd.osgeo.mapguide.package + + + mgz + application/vnd.proteus.magazine + + + mid + audio/midi + + + midi + audio/midi + + + mif + application/x-mif + + + mime + message/rfc822 + + + mj2 + video/mj2 + + + mjp2 + video/mj2 + + + mlp + application/vnd.dolby.mlp + + + mmd + application/vnd.chipnuts.karaoke-mmd + + + mmf + application/vnd.smaf + + + mmr + image/vnd.fujixerox.edmics-mmr + + + mny + application/x-msmoney + + + mobi + application/x-mobipocket-ebook + + + mods + application/mods+xml + + + mov + video/quicktime + + + movie + video/x-sgi-movie + + + mp1 + audio/mpeg + + + mp2 + audio/mpeg + + + mp21 + application/mp21 + + + mp2a + audio/mpeg + + + mp3 + audio/mpeg + + + mp4 + video/mp4 + + + mp4a + audio/mp4 + + + mp4s + application/mp4 + + + mp4v + video/mp4 + + + mpa + audio/mpeg + + + mpc + application/vnd.mophun.certificate + + + mpe + video/mpeg + + + mpeg + video/mpeg + + + mpega + audio/x-mpeg + + + mpg + video/mpeg + + + mpg4 + video/mp4 + + + mpga + audio/mpeg + + + mpkg + application/vnd.apple.installer+xml + + + mpm + application/vnd.blueice.multipass + + + mpn + application/vnd.mophun.application + + + mpp + application/vnd.ms-project + + + mpt + application/vnd.ms-project + + + mpv2 + video/mpeg2 + + + mpy + application/vnd.ibm.minipay + + + mqy + application/vnd.mobius.mqy + + + mrc + application/marc + + + mrcx + application/marcxml+xml + + + ms + text/troff + + + mscml + application/mediaservercontrol+xml + + + mseed + application/vnd.fdsn.mseed + + + mseq + application/vnd.mseq + + + msf + application/vnd.epson.msf + + + msh + model/mesh + + + msi + application/x-msdownload + + + msl + application/vnd.mobius.msl + + + msty + application/vnd.muvee.style + + + mts + model/vnd.mts + + + mus + application/vnd.musician + + + musicxml + application/vnd.recordare.musicxml+xml + + + mvb + application/x-msmediaview + + + mwf + application/vnd.mfer + + + mxf + application/mxf + + + mxl + application/vnd.recordare.musicxml + + + mxml + application/xv+xml + + + mxs + application/vnd.triscape.mxs + + + mxu + video/vnd.mpegurl + + + n-gage + application/vnd.nokia.n-gage.symbian.install + + + n3 + text/n3 + + + nb + application/mathematica + + + nbp + application/vnd.wolfram.player + + + nc + application/x-netcdf + + + ncx + application/x-dtbncx+xml + + + ngdat + application/vnd.nokia.n-gage.data + + + nlu + application/vnd.neurolanguage.nlu + + + nml + application/vnd.enliven + + + nnd + application/vnd.noblenet-directory + + + nns + application/vnd.noblenet-sealer + + + nnw + application/vnd.noblenet-web + + + npx + image/vnd.net-fpx + + + nsf + application/vnd.lotus-notes + + + oa2 + application/vnd.fujitsu.oasys2 + + + oa3 + application/vnd.fujitsu.oasys3 + + + oas + application/vnd.fujitsu.oasys + + + obd + application/x-msbinder + + + oda + application/oda + + + + odb + application/vnd.oasis.opendocument.database + + + + odc + application/vnd.oasis.opendocument.chart + + + + odf + application/vnd.oasis.opendocument.formula + + + odft + application/vnd.oasis.opendocument.formula-template + + + + odg + application/vnd.oasis.opendocument.graphics + + + + odi + application/vnd.oasis.opendocument.image + + + + odm + application/vnd.oasis.opendocument.text-master + + + + odp + application/vnd.oasis.opendocument.presentation + + + + ods + application/vnd.oasis.opendocument.spreadsheet + + + + odt + application/vnd.oasis.opendocument.text + + + oga + audio/ogg + + + ogg + audio/ogg + + + ogv + video/ogg + + + + ogx + application/ogg + + + onepkg + application/onenote + + + onetmp + application/onenote + + + onetoc + application/onenote + + + onetoc2 + application/onenote + + + opf + application/oebps-package+xml + + + oprc + application/vnd.palm + + + org + application/vnd.lotus-organizer + + + osf + application/vnd.yamaha.openscoreformat + + + osfpvg + application/vnd.yamaha.openscoreformat.osfpvg+xml + + + otc + application/vnd.oasis.opendocument.chart-template + + + otf + application/x-font-otf + + + + otg + application/vnd.oasis.opendocument.graphics-template + + + + oth + application/vnd.oasis.opendocument.text-web + + + oti + application/vnd.oasis.opendocument.image-template + + + + otp + application/vnd.oasis.opendocument.presentation-template + + + + ots + application/vnd.oasis.opendocument.spreadsheet-template + + + + ott + application/vnd.oasis.opendocument.text-template + + + oxps + application/oxps + + + oxt + application/vnd.openofficeorg.extension + + + p + text/x-pascal + + + p10 + application/pkcs10 + + + p12 + application/x-pkcs12 + + + p7b + application/x-pkcs7-certificates + + + p7c + application/pkcs7-mime + + + p7m + application/pkcs7-mime + + + p7r + application/x-pkcs7-certreqresp + + + p7s + application/pkcs7-signature + + + p8 + application/pkcs8 + + + pas + text/x-pascal + + + paw + application/vnd.pawaafile + + + pbd + application/vnd.powerbuilder6 + + + pbm + image/x-portable-bitmap + + + pcap + application/vnd.tcpdump.pcap + + + pcf + application/x-font-pcf + + + pcl + application/vnd.hp-pcl + + + pclxl + application/vnd.hp-pclxl + + + pct + image/pict + + + pcurl + application/vnd.curl.pcurl + + + pcx + image/x-pcx + + + pdb + application/vnd.palm + + + pdf + application/pdf + + + pfa + application/x-font-type1 + + + pfb + application/x-font-type1 + + + pfm + application/x-font-type1 + + + pfr + application/font-tdpfr + + + pfx + application/x-pkcs12 + + + pgm + image/x-portable-graymap + + + pgn + application/x-chess-pgn + + + pgp + application/pgp-encrypted + + + pic + image/pict + + + pict + image/pict + + + pkg + application/octet-stream + + + pki + application/pkixcmp + + + pkipath + application/pkix-pkipath + + + plb + application/vnd.3gpp.pic-bw-large + + + plc + application/vnd.mobius.plc + + + plf + application/vnd.pocketlearn + + + pls + audio/x-scpls + + + pml + application/vnd.ctc-posml + + + png + image/png + + + pnm + image/x-portable-anymap + + + pnt + image/x-macpaint + + + portpkg + application/vnd.macports.portpkg + + + pot + application/vnd.ms-powerpoint + + + potm + application/vnd.ms-powerpoint.template.macroenabled.12 + + + potx + application/vnd.openxmlformats-officedocument.presentationml.template + + + ppam + application/vnd.ms-powerpoint.addin.macroenabled.12 + + + ppd + application/vnd.cups-ppd + + + ppm + image/x-portable-pixmap + + + pps + application/vnd.ms-powerpoint + + + ppsm + application/vnd.ms-powerpoint.slideshow.macroenabled.12 + + + ppsx + application/vnd.openxmlformats-officedocument.presentationml.slideshow + + + ppt + application/vnd.ms-powerpoint + + + pptm + application/vnd.ms-powerpoint.presentation.macroenabled.12 + + + pptx + application/vnd.openxmlformats-officedocument.presentationml.presentation + + + pqa + application/vnd.palm + + + prc + application/x-mobipocket-ebook + + + pre + application/vnd.lotus-freelance + + + prf + application/pics-rules + + + ps + application/postscript + + + psb + application/vnd.3gpp.pic-bw-small + + + psd + image/vnd.adobe.photoshop + + + psf + application/x-font-linux-psf + + + pskcxml + application/pskc+xml + + + ptid + application/vnd.pvi.ptid1 + + + pub + application/x-mspublisher + + + pvb + application/vnd.3gpp.pic-bw-var + + + pwn + application/vnd.3m.post-it-notes + + + pya + audio/vnd.ms-playready.media.pya + + + pyv + video/vnd.ms-playready.media.pyv + + + qam + application/vnd.epson.quickanime + + + qbo + application/vnd.intu.qbo + + + qfx + application/vnd.intu.qfx + + + qps + application/vnd.publishare-delta-tree + + + qt + video/quicktime + + + qti + image/x-quicktime + + + qtif + image/x-quicktime + + + qwd + application/vnd.quark.quarkxpress + + + qwt + application/vnd.quark.quarkxpress + + + qxb + application/vnd.quark.quarkxpress + + + qxd + application/vnd.quark.quarkxpress + + + qxl + application/vnd.quark.quarkxpress + + + qxt + application/vnd.quark.quarkxpress + + + ra + audio/x-pn-realaudio + + + ram + audio/x-pn-realaudio + + + rar + application/x-rar-compressed + + + ras + image/x-cmu-raster + + + rcprofile + application/vnd.ipunplugged.rcprofile + + + rdf + application/rdf+xml + + + rdz + application/vnd.data-vision.rdz + + + rep + application/vnd.businessobjects + + + res + application/x-dtbresource+xml + + + rgb + image/x-rgb + + + rif + application/reginfo+xml + + + rip + audio/vnd.rip + + + rl + application/resource-lists+xml + + + rlc + image/vnd.fujixerox.edmics-rlc + + + rld + application/resource-lists-diff+xml + + + rm + application/vnd.rn-realmedia + + + rmi + audio/midi + + + rmp + audio/x-pn-realaudio-plugin + + + rms + application/vnd.jcp.javame.midlet-rms + + + rnc + application/relax-ng-compact-syntax + + + roa + application/rpki-roa + + + roff + text/troff + + + rp9 + application/vnd.cloanto.rp9 + + + rpss + application/vnd.nokia.radio-presets + + + rpst + application/vnd.nokia.radio-preset + + + rq + application/sparql-query + + + rs + application/rls-services+xml + + + rsd + application/rsd+xml + + + rss + application/rss+xml + + + rtf + application/rtf + + + rtx + text/richtext + + + s + text/x-asm + + + saf + application/vnd.yamaha.smaf-audio + + + sbml + application/sbml+xml + + + sc + application/vnd.ibm.secure-container + + + scd + application/x-msschedule + + + scm + application/vnd.lotus-screencam + + + scq + application/scvp-cv-request + + + scs + application/scvp-cv-response + + + scurl + text/vnd.curl.scurl + + + sda + application/vnd.stardivision.draw + + + sdc + application/vnd.stardivision.calc + + + sdd + application/vnd.stardivision.impress + + + sdkd + application/vnd.solent.sdkm+xml + + + sdkm + application/vnd.solent.sdkm+xml + + + sdp + application/sdp + + + sdw + application/vnd.stardivision.writer + + + see + application/vnd.seemail + + + seed + application/vnd.fdsn.seed + + + sema + application/vnd.sema + + + semd + application/vnd.semd + + + semf + application/vnd.semf + + + ser + application/java-serialized-object + + + setpay + application/set-payment-initiation + + + setreg + application/set-registration-initiation + + + sfd-hdstx + application/vnd.hydrostatix.sof-data + + + sfs + application/vnd.spotfire.sfs + + + sgl + application/vnd.stardivision.writer-global + + + sgm + text/sgml + + + sgml + text/sgml + + + sh + application/x-sh + + + shar + application/x-shar + + + shf + application/shf+xml + + + + sig + application/pgp-signature + + + silo + model/mesh + + + sis + application/vnd.symbian.install + + + sisx + application/vnd.symbian.install + + + sit + application/x-stuffit + + + sitx + application/x-stuffitx + + + skd + application/vnd.koan + + + skm + application/vnd.koan + + + skp + application/vnd.koan + + + skt + application/vnd.koan + + + sldm + application/vnd.ms-powerpoint.slide.macroenabled.12 + + + sldx + application/vnd.openxmlformats-officedocument.presentationml.slide + + + slt + application/vnd.epson.salt + + + sm + application/vnd.stepmania.stepchart + + + smf + application/vnd.stardivision.math + + + smi + application/smil+xml + + + smil + application/smil+xml + + + smzip + application/vnd.stepmania.package + + + snd + audio/basic + + + snf + application/x-font-snf + + + so + application/octet-stream + + + spc + application/x-pkcs7-certificates + + + spf + application/vnd.yamaha.smaf-phrase + + + spl + application/x-futuresplash + + + spot + text/vnd.in3d.spot + + + spp + application/scvp-vp-response + + + spq + application/scvp-vp-request + + + spx + audio/ogg + + + src + application/x-wais-source + + + sru + application/sru+xml + + + srx + application/sparql-results+xml + + + sse + application/vnd.kodak-descriptor + + + ssf + application/vnd.epson.ssf + + + ssml + application/ssml+xml + + + st + application/vnd.sailingtracker.track + + + stc + application/vnd.sun.xml.calc.template + + + std + application/vnd.sun.xml.draw.template + + + stf + application/vnd.wt.stf + + + sti + application/vnd.sun.xml.impress.template + + + stk + application/hyperstudio + + + stl + application/vnd.ms-pki.stl + + + str + application/vnd.pg.format + + + stw + application/vnd.sun.xml.writer.template + + + sub + text/vnd.dvb.subtitle + + + sus + application/vnd.sus-calendar + + + susp + application/vnd.sus-calendar + + + sv4cpio + application/x-sv4cpio + + + sv4crc + application/x-sv4crc + + + svc + application/vnd.dvb.service + + + svd + application/vnd.svd + + + svg + image/svg+xml + + + svgz + image/svg+xml + + + swa + application/x-director + + + swf + application/x-shockwave-flash + + + swi + application/vnd.aristanetworks.swi + + + sxc + application/vnd.sun.xml.calc + + + sxd + application/vnd.sun.xml.draw + + + sxg + application/vnd.sun.xml.writer.global + + + sxi + application/vnd.sun.xml.impress + + + sxm + application/vnd.sun.xml.math + + + sxw + application/vnd.sun.xml.writer + + + t + text/troff + + + taglet + application/vnd.mynfc + + + tao + application/vnd.tao.intent-module-archive + + + tar + application/x-tar + + + tcap + application/vnd.3gpp2.tcap + + + tcl + application/x-tcl + + + teacher + application/vnd.smart.teacher + + + tei + application/tei+xml + + + teicorpus + application/tei+xml + + + tex + application/x-tex + + + texi + application/x-texinfo + + + texinfo + application/x-texinfo + + + text + text/plain + + + tfi + application/thraud+xml + + + tfm + application/x-tex-tfm + + + thmx + application/vnd.ms-officetheme + + + tif + image/tiff + + + tiff + image/tiff + + + tmo + application/vnd.tmobile-livetv + + + torrent + application/x-bittorrent + + + tpl + application/vnd.groove-tool-template + + + tpt + application/vnd.trid.tpt + + + tr + text/troff + + + tra + application/vnd.trueapp + + + trm + application/x-msterminal + + + tsd + application/timestamped-data + + + tsv + text/tab-separated-values + + + ttc + application/x-font-ttf + + + ttf + application/x-font-ttf + + + ttl + text/turtle + + + twd + application/vnd.simtech-mindmapper + + + twds + application/vnd.simtech-mindmapper + + + txd + application/vnd.genomatix.tuxedo + + + txf + application/vnd.mobius.txf + + + txt + text/plain + + + u32 + application/x-authorware-bin + + + udeb + application/x-debian-package + + + ufd + application/vnd.ufdl + + + ufdl + application/vnd.ufdl + + + ulw + audio/basic + + + umj + application/vnd.umajin + + + unityweb + application/vnd.unity + + + uoml + application/vnd.uoml+xml + + + uri + text/uri-list + + + uris + text/uri-list + + + urls + text/uri-list + + + ustar + application/x-ustar + + + utz + application/vnd.uiq.theme + + + uu + text/x-uuencode + + + uva + audio/vnd.dece.audio + + + uvd + application/vnd.dece.data + + + uvf + application/vnd.dece.data + + + uvg + image/vnd.dece.graphic + + + uvh + video/vnd.dece.hd + + + uvi + image/vnd.dece.graphic + + + uvm + video/vnd.dece.mobile + + + uvp + video/vnd.dece.pd + + + uvs + video/vnd.dece.sd + + + uvt + application/vnd.dece.ttml+xml + + + uvu + video/vnd.uvvu.mp4 + + + uvv + video/vnd.dece.video + + + uvva + audio/vnd.dece.audio + + + uvvd + application/vnd.dece.data + + + uvvf + application/vnd.dece.data + + + uvvg + image/vnd.dece.graphic + + + uvvh + video/vnd.dece.hd + + + uvvi + image/vnd.dece.graphic + + + uvvm + video/vnd.dece.mobile + + + uvvp + video/vnd.dece.pd + + + uvvs + video/vnd.dece.sd + + + uvvt + application/vnd.dece.ttml+xml + + + uvvu + video/vnd.uvvu.mp4 + + + uvvv + video/vnd.dece.video + + + uvvx + application/vnd.dece.unspecified + + + uvvz + application/vnd.dece.zip + + + uvx + application/vnd.dece.unspecified + + + uvz + application/vnd.dece.zip + + + vcard + text/vcard + + + vcd + application/x-cdlink + + + vcf + text/x-vcard + + + vcg + application/vnd.groove-vcard + + + vcs + text/x-vcalendar + + + vcx + application/vnd.vcx + + + vis + application/vnd.visionary + + + viv + video/vnd.vivo + + + vor + application/vnd.stardivision.writer + + + vox + application/x-authorware-bin + + + vrml + model/vrml + + + vsd + application/vnd.visio + + + vsf + application/vnd.vsf + + + vss + application/vnd.visio + + + vst + application/vnd.visio + + + vsw + application/vnd.visio + + + vtu + model/vnd.vtu + + + vxml + application/voicexml+xml + + + w3d + application/x-director + + + wad + application/x-doom + + + wav + audio/x-wav + + + wax + audio/x-ms-wax + + + + wbmp + image/vnd.wap.wbmp + + + wbs + application/vnd.criticaltools.wbs+xml + + + wbxml + application/vnd.wap.wbxml + + + wcm + application/vnd.ms-works + + + wdb + application/vnd.ms-works + + + weba + audio/webm + + + webm + video/webm + + + webp + image/webp + + + wg + application/vnd.pmi.widget + + + wgt + application/widget + + + wks + application/vnd.ms-works + + + wm + video/x-ms-wm + + + wma + audio/x-ms-wma + + + wmd + application/x-ms-wmd + + + wmf + application/x-msmetafile + + + + wml + text/vnd.wap.wml + + + + wmlc + application/vnd.wap.wmlc + + + + wmls + text/vnd.wap.wmlscript + + + + wmlsc + application/vnd.wap.wmlscriptc + + + wmv + video/x-ms-wmv + + + wmx + video/x-ms-wmx + + + wmz + application/x-ms-wmz + + + woff + application/x-font-woff + + + wpd + application/vnd.wordperfect + + + wpl + application/vnd.ms-wpl + + + wps + application/vnd.ms-works + + + wqd + application/vnd.wqd + + + wri + application/x-mswrite + + + wrl + model/vrml + + + wsdl + application/wsdl+xml + + + wspolicy + application/wspolicy+xml + + + wtb + application/vnd.webturbo + + + wvx + video/x-ms-wvx + + + x32 + application/x-authorware-bin + + + x3d + application/vnd.hzn-3d-crossword + + + xap + application/x-silverlight-app + + + xar + application/vnd.xara + + + xbap + application/x-ms-xbap + + + xbd + application/vnd.fujixerox.docuworks.binder + + + xbm + image/x-xbitmap + + + xdf + application/xcap-diff+xml + + + xdm + application/vnd.syncml.dm+xml + + + xdp + application/vnd.adobe.xdp+xml + + + xdssc + application/dssc+xml + + + xdw + application/vnd.fujixerox.docuworks + + + xenc + application/xenc+xml + + + xer + application/patch-ops-error+xml + + + xfdf + application/vnd.adobe.xfdf + + + xfdl + application/vnd.xfdl + + + xht + application/xhtml+xml + + + xhtml + application/xhtml+xml + + + xhvml + application/xv+xml + + + xif + image/vnd.xiff + + + xla + application/vnd.ms-excel + + + xlam + application/vnd.ms-excel.addin.macroenabled.12 + + + xlc + application/vnd.ms-excel + + + xlm + application/vnd.ms-excel + + + xls + application/vnd.ms-excel + + + xlsb + application/vnd.ms-excel.sheet.binary.macroenabled.12 + + + xlsm + application/vnd.ms-excel.sheet.macroenabled.12 + + + xlsx + application/vnd.openxmlformats-officedocument.spreadsheetml.sheet + + + xlt + application/vnd.ms-excel + + + xltm + application/vnd.ms-excel.template.macroenabled.12 + + + xltx + application/vnd.openxmlformats-officedocument.spreadsheetml.template + + + xlw + application/vnd.ms-excel + + + xml + application/xml + + + xo + application/vnd.olpc-sugar + + + xop + application/xop+xml + + + xpi + application/x-xpinstall + + + xpm + image/x-xpixmap + + + xpr + application/vnd.is-xpr + + + xps + application/vnd.ms-xpsdocument + + + xpw + application/vnd.intercon.formnet + + + xpx + application/vnd.intercon.formnet + + + xsl + application/xml + + + xslt + application/xslt+xml + + + xsm + application/vnd.syncml+xml + + + xspf + application/xspf+xml + + + xul + application/vnd.mozilla.xul+xml + + + xvm + application/xv+xml + + + xvml + application/xv+xml + + + xwd + image/x-xwindowdump + + + xyz + chemical/x-xyz + + + yang + application/yang + + + yin + application/yin+xml + + + z + application/x-compress + + + Z + application/x-compress + + + zaz + application/vnd.zzazz.deck+xml + + + zip + application/zip + + + zir + application/vnd.zul + + + zirz + application/vnd.zul + + + zmm + application/vnd.handheld-entertainment+xml + + + + + + + + + + + + + + + + + + index.html + index.htm + index.jsp + + + diff --git a/tomcat-multiple-instances/handlers/main.yml b/tomcat-multiple-instances/handlers/main.yml new file mode 100644 index 00000000..2f721cb5 --- /dev/null +++ b/tomcat-multiple-instances/handlers/main.yml @@ -0,0 +1,23 @@ +--- +- name: tomcat restart instances with changed configs + service: name=tomcat-instance-'{{ item.item.http_port }}' state=restarted sleep=20 + with_items: restart_needed.results + when: item.changed + +- name: tomcat restart instances with changed jmx config + service: name=tomcat-instance-'{{ item.item.http_port }}' state=restarted sleep=20 + with_items: jmx_restart_needed.results + when: item.changed + +- name: tomcat instances restart + service: name=tomcat-instance-'{{ item.http_port }}' state=restarted sleep=20 + with_items: tomcat_m_instances + +- name: enable tomcat instances + service: name=tomcat-instance-'{{ item.http_port }}' state=started enabled=yes sleep 20 + with_items: tomcat_m_instances + +- name: disable tomcat instances + service: name=tomcat-instance-'{{ item.http_port }}' state=stopped enabled=no sleep 20 + with_items: tomcat_m_instances + diff --git a/tomcat-multiple-instances/meta/main.yml b/tomcat-multiple-instances/meta/main.yml new file mode 100644 index 00000000..3ccd3739 --- /dev/null +++ b/tomcat-multiple-instances/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: '../../library/tomcat' diff --git a/tomcat-multiple-instances/tasks/main.yml b/tomcat-multiple-instances/tasks/main.yml new file mode 100644 index 00000000..6de1ccba --- /dev/null +++ b/tomcat-multiple-instances/tasks/main.yml @@ -0,0 +1,186 @@ +--- +# +# Note: the library role 'tomcat' is a dependency +# +- name: disable the tomcat main instance + service: name=tomcat'{{ tomcat_version }}' state=stopped enabled=no + when: not tomcat_service_enabled + tags: + - tomcat + - tomcat_instances + +- name: Create a tomcat user for each instance if needed + user: name={{ item.user }} home={{ item.user_home }} createhome=false shell={{ item.user_shell }} + with_items: tomcat_m_instances + when: + - not tomcat_m_use_default_user + - item.user != "tomcat{{ tomcat_version }}" + - item.user_shell is defined + register: tomcat_first_install + tags: + - tomcat + - tomcat_instances + +- name: Create a tomcat user if needed + user: name={{ tomcat_m_default_user }} home={{ tomcat_m_instances_base_path }} createhome=false shell={{ tomcat_m_default_user_shell }} + when: + - tomcat_m_use_default_user + - tomcat_m_default_user != "tomcat{{ tomcat_version }}" + register: tomcat_first_install + tags: + - tomcat + - tomcat_instances + +- name: Create the instances directory trees + file: dest={{ item.0.instance_path }}/{{ item[1] }} owner={{ item.0.user }} group={{ item.0.user }} mode=0755 state=directory + with_nested: + - tomcat_m_instances + - [ 'common/classes', 'conf/Catalina/localhost', 'conf/policy.d', 'lib', 'server/classes', 'shared/classes', 'webapps' ] + register: tomcat_first_install + tags: + - tomcat + - tomcat_instances + +- name: Create the instances log dirs + file: dest={{ tomcat_m_instances_logdir_base }}/{{ item.http_port }} owner={{ item.user }} group={{ item.user }} mode=0755 state=directory + with_items: tomcat_m_instances + register: tomcat_first_install + tags: + - tomcat + - tomcat_instances + +- name: Create the instances work dirs + file: dest={{ tomcat_m_cache_base }}/{{ item.http_port }} owner={{ item.user }} group={{ item.user }} mode=0755 state=directory + with_items: tomcat_m_instances + register: tomcat_first_install + tags: + - tomcat + - tomcat_instances + +- name: Create links to work dir inside the instances directory tree + file: src={{ tomcat_m_cache_base }}/{{ item.http_port }} dest={{ item.instance_path }}/work state=link + with_items: tomcat_m_instances + register: tomcat_first_install + tags: + - tomcat + - tomcat_instances + +- name: Create links to log dir inside the instances directory tree + file: src={{ tomcat_m_instances_logdir_base }}/{{ item.http_port }} dest={{ item.instance_path }}/logs state=link + with_items: tomcat_m_instances + register: tomcat_first_install + tags: + - tomcat + - tomcat_instances + +- name: Populate the instances conf directory + copy: src={{ item[1] }} dest={{ item.0.instance_path }}/conf/{{ item[1] }} owner={{ item.0.user }} group={{ item.0.user }} mode=0640 + with_nested: + - tomcat_m_instances + - [ 'catalina.properties', 'context.xml', 'tomcat-users.xml', 'web.xml' ] + register: restart_needed + notify: + - tomcat restart instances with changed configs + tags: + - tomcat + - tomcat_instances + +- name: Populate the instances conf/policy.d directory + copy: src=policy.d/{{ item[1] }} dest={{ item.0.instance_path }}/conf/policy.d/{{ item[1] }} owner={{ item.0.user }} group={{ item.0.user }} mode=0640 + with_nested: + - tomcat_m_instances + - [ '01system.policy', '02debian.policy', '03catalina.policy', '04webapps.policy', '50local.policy' ] + register: restart_needed + notify: + - tomcat restart instances with changed configs + tags: + - tomcat + - tomcat_instances + +- name: Install logging.properties if we do not use log4j for the tomcat logging + copy: src={{ item[1] }} dest={{ item.0.instance_path }}/conf/{{ item[1] }} owner={{ item.0.user }} group={{ item.0.user }} mode=0640 + with_nested: + - tomcat_m_instances + - [ 'logging.properties' ] + when: + - tomcat_use_log4j is defined + - not tomcat_use_log4j + register: restart_needed + notify: + - tomcat restart instances with changed configs + tags: + - tomcat + - tomcat_instances + +- name: Install the server.xml conf file + template: src=tomcat-server.xml.j2 dest={{ item.instance_path }}/conf/server.xml owner={{ item.user }} group={{ item.user }} mode=0640 + with_items: tomcat_m_instances + register: restart_needed + notify: + - tomcat restart instances with changed configs + tags: + - tomcat + - tomcat_instances + +- name: Install the instances startup scripts + template: src=tomcat-instance.init.j2 dest=/etc/init.d/tomcat-instance-{{ item.http_port }} mode=0755 owner=root group=root + with_items: tomcat_m_instances + tags: + - tomcat + - tomcat_instances + +- name: Install the tomcat instances default file + template: src=tomcat-default.j2 dest=/etc/default/tomcat-instance-{{ item.http_port }} mode=0640 owner=root group={{ item.user }} + with_items: tomcat_m_instances + register: restart_needed + notify: + - tomcat restart instances with changed configs + tags: + - tomcat + - tomcat_instances + +- name: Install a logrotate entry for the access log file + template: src=tomcat.logrotate.j2 dest=/etc/logrotate.d/tomcat_instance-{{ item.http_port }} owner=root group=root mode=0644 + with_items: tomcat_m_instances + tags: + - tomcat + - tomcat_instances + +- name: Install the jmx authorization file + template: src=jmxremote.passwd.j2 dest={{ item.instance_path }}/conf/jmxremote.passwd owner={{ item.user }} group={{ item.user }} mode=0600 + with_items: tomcat_m_instances + when: + - item.jmx_enabled is defined + - item.jmx_auth_enabled is defined + - item.jmx_enabled + - item.jmx_auth_enabled + register: jmx_restart_needed + notify: + - tomcat restart instances with changed jmx config + tags: + - tomcat + - tomcat_instances + +- name: Install the jmx role file + copy: src=jmxremote.access dest={{ item.instance_path }}/conf/jmxremote.access owner={{ item.user }} group={{ item.user }} mode=0644 + with_items: tomcat_m_instances + when: + - item.jmx_enabled is defined + - item.jmx_auth_enabled is defined + - item.jmx_enabled + - item.jmx_auth_enabled + register: jmx restart_needed + notify: + - tomcat restart instances with changed jmx config + tags: + - tomcat + - tomcat_instances + +- name: Start and enable all the tomcat instances + service: name=tomcat-instance-'{{ item.http_port }}' state=started sleep=20 enabled=yes + with_items: tomcat_m_instances + when: tomcat_first_install.changed + tags: + - tomcat + - tomcat_instances + diff --git a/tomcat-multiple-instances/templates/jmxremote.passwd.j2 b/tomcat-multiple-instances/templates/jmxremote.passwd.j2 new file mode 100644 index 00000000..c064d4d6 --- /dev/null +++ b/tomcat-multiple-instances/templates/jmxremote.passwd.j2 @@ -0,0 +1,2 @@ +monitorRole {{ item.jmx_monitorpass }} +controlRole {{ item.jmx_controlpass }} diff --git a/tomcat-multiple-instances/templates/tomcat-default.j2 b/tomcat-multiple-instances/templates/tomcat-default.j2 new file mode 100644 index 00000000..f820c6c0 --- /dev/null +++ b/tomcat-multiple-instances/templates/tomcat-default.j2 @@ -0,0 +1,32 @@ +TOMCAT_USER={{ item.user }} +TOMCAT_GROUP={{ item.user }} +JAVA_HOME={{ item.java_home }} +JAVA_OPTS="-server -Djava.awt.headless=true" +{% if item.java_opts is defined %} +JAVA_OPTS="{{ item.java_opts }} $JAVA_OPTS" +{% endif %} +{% if item.java_gc_opts is defined %} +JAVA_OPTS="{{ item.java_gc_opts }} $JAVA_OPTS" +{% endif %} +{% if item.other_java_opts is defined %} +JAVA_OPTS="${JAVA_OPTS} {{ item.other_java_opts }}" +{% endif %} +{% if item.jmx_enabled is defined and item.jmx_enabled %} +JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port={{ item.jmx_port }} -Dcom.sun.management.jmxremote.ssl=false" +{% if item.jmx_auth_enabled is defined and item.jmx_auth_enabled %} +JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote.password.file={{ item.jmx_auth_dir }}/jmxremote.password -Dcom.sun.management.jmxremote.access.file={{ item.jmx_auth_dir }}/jmxremote.access" +{% else %} +JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote.authenticate=false" +{% endif %} +{% endif %} +{% if item.remote_debugging is defined and item.remote_debugging %} +# To enable remote debugging uncomment the following line. +# You will then be able to use a java debugger on port {{ item.remote_debugging_port }}. +JAVA_OPTS="${JAVA_OPTS} -Xdebug -Xrunjdwp:transport=dt_socket,address={{ item.remote_debugging_port }},server=y,suspend=n" +{% endif %} +# WARNING: This directory will be destroyed and recreated at every startup ! +JVM_TMP={{ item.instance_path }}/tmp +# Additional options not managed by the provisioning tools +if [ -f /etc/default/tomcat-instance-{{ item.http_port }}.local ] ; then + . /etc/default/tomcat-instance-{{ item.http_port }}.local +fi diff --git a/tomcat-multiple-instances/templates/tomcat-instance.init.j2 b/tomcat-multiple-instances/templates/tomcat-instance.init.j2 new file mode 100755 index 00000000..99f26c53 --- /dev/null +++ b/tomcat-multiple-instances/templates/tomcat-instance.init.j2 @@ -0,0 +1,296 @@ +#!/bin/sh +# +# /etc/init.d/tomcat-instance-{{ item.http_port }} -- startup script for the Tomcat 7 {{ item.user }} servlet engine on port {{ item.http_port }} +# +# Written by Miquel van Smoorenburg . +# Modified for Debian GNU/Linux by Ian Murdock . +# Modified for Tomcat by Stefan Gybas . +# Modified for Tomcat6 by Thierry Carrez . +# Modified for Tomcat7 by Ernesto Hernandez-Novich . +# Additional improvements by Jason Brittain . +# +### BEGIN INIT INFO +# Provides: tomcat-instance-8280 +# Required-Start: $local_fs $remote_fs $network +# Required-Stop: $local_fs $remote_fs $network +# Should-Start: $named +# Should-Stop: $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start Tomcat. +# Description: Start the Tomcat servlet engine. +### END INIT INFO + +set -e + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +NAME=tomcat-instance-{{ item.http_port }} +DESC="Tomcat servlet engine" +DEFAULT=/etc/default/$NAME +JVM_TMP=/var/tmp/$NAME-tmp + +if [ `id -u` -ne 0 ]; then + echo "You need root privileges to run this script" + exit 1 +fi + +# Make sure tomcat is started with system locale +if [ -r /etc/default/locale ]; then + . /etc/default/locale + export LANG +fi + +. /lib/lsb/init-functions + +if [ -r /etc/default/rcS ]; then + . /etc/default/rcS +fi + + +# The following variables can be overwritten in $DEFAULT + +# Run Tomcat 7 as this user ID and group ID +TOMCAT7_USER={{ item.user }} +TOMCAT7_GROUP={{ item.user }} + +# this is a work-around until there is a suitable runtime replacement +# for dpkg-architecture for arch:all packages +# this function sets the variable OPENJDKS +find_openjdks() +{ + for jvmdir in /usr/lib/jvm/java-7-openjdk-* + do + if [ -d "${jvmdir}" -a "${jvmdir}" != "/usr/lib/jvm/java-7-openjdk-common" ] + then + OPENJDKS=$jvmdir + fi + done + for jvmdir in /usr/lib/jvm/java-6-openjdk-* + do + if [ -d "${jvmdir}" -a "${jvmdir}" != "/usr/lib/jvm/java-6-openjdk-common" ] + then + OPENJDKS="${OPENJDKS} ${jvmdir}" + fi + done +} + +OPENJDKS="" +find_openjdks +# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not +# defined in $DEFAULT) +JDK_DIRS="/usr/lib/jvm/default-java ${OPENJDKS} /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-sun" + +# Look for the right JVM to use +for jdir in $JDK_DIRS; do + if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then + JAVA_HOME="$jdir" + fi +done +export JAVA_HOME + +# Directory where the Tomcat binary distribution resides +CATALINA_HOME=/usr/share/tomcat7 + +# Directory for per-instance configuration files and webapps +CATALINA_BASE={{ item.instance_path }} + +# Use the Java security manager? (yes/no) +TOMCAT7_SECURITY=no + +# Default Java options +# Set java.awt.headless=true if JAVA_OPTS is not set so the +# Xalan XSL transformer can work without X11 display on JDK 1.4+ +# It also looks like the default heap size of 64M is not enough for most cases +# so the maximum heap size is set to 128M +if [ -z "$JAVA_OPTS" ]; then + JAVA_OPTS="-Djava.awt.headless=true -Xmx512M" +fi + +# End of variables that can be overwritten in $DEFAULT + +# overwrite settings from default file +if [ -f "$DEFAULT" ]; then + . "$DEFAULT" +fi + +if [ ! -f "$CATALINA_HOME/bin/bootstrap.jar" ]; then + log_failure_msg "$NAME is not installed" + exit 1 +fi + +POLICY_CACHE="$CATALINA_BASE/work/catalina.policy" + +if [ -z "$CATALINA_TMPDIR" ]; then + CATALINA_TMPDIR="$JVM_TMP" +fi + +# Set the JSP compiler if set in the tomcat7.default file +if [ -n "$JSP_COMPILER" ]; then + JAVA_OPTS="$JAVA_OPTS -Dbuild.compiler=\"$JSP_COMPILER\"" +fi + +SECURITY="" +if [ "$TOMCAT7_SECURITY" = "yes" ]; then + SECURITY="-security" +fi + +# Define other required variables +CATALINA_PID="/var/run/$NAME.pid" +CATALINA_SH="$CATALINA_HOME/bin/catalina.sh" + +# Look for Java Secure Sockets Extension (JSSE) JARs +if [ -z "${JSSE_HOME}" -a -r "${JAVA_HOME}/jre/lib/jsse.jar" ]; then + JSSE_HOME="${JAVA_HOME}/jre/" +fi + +catalina_sh() { + # Escape any double quotes in the value of JAVA_OPTS + JAVA_OPTS="$(echo $JAVA_OPTS | sed 's/\"/\\\"/g')" + + AUTHBIND_COMMAND="" + if [ "$AUTHBIND" = "yes" -a "$1" = "start" ]; then + JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true" + AUTHBIND_COMMAND="/usr/bin/authbind --deep /bin/bash -c " + fi + + # Define the command to run Tomcat's catalina.sh as a daemon + # set -a tells sh to export assigned variables to spawned shells. + TOMCAT_SH="set -a; JAVA_HOME=\"$JAVA_HOME\"; source \"$DEFAULT\"; \ + CATALINA_HOME=\"$CATALINA_HOME\"; \ + CATALINA_BASE=\"$CATALINA_BASE\"; \ + JAVA_OPTS=\"$JAVA_OPTS\"; \ + CATALINA_PID=\"$CATALINA_PID\"; \ + CATALINA_TMPDIR=\"$CATALINA_TMPDIR\"; \ + LANG=\"$LANG\"; JSSE_HOME=\"$JSSE_HOME\"; \ + cd \"$CATALINA_BASE\"; \ + \"$CATALINA_SH\" $@" + + if [ "$AUTHBIND" = "yes" -a "$1" = "start" ]; then + TOMCAT_SH="'$TOMCAT_SH'" + fi + + # Run the catalina.sh script as a daemon + set +e + touch "$CATALINA_PID" "$CATALINA_BASE"/logs/catalina.out + chown $TOMCAT7_USER "$CATALINA_PID" "$CATALINA_BASE"/logs/catalina.out + start-stop-daemon --start -b -u "$TOMCAT7_USER" -g "$TOMCAT7_GROUP" \ + -c "$TOMCAT7_USER" -d "$CATALINA_TMPDIR" -p "$CATALINA_PID" \ + -x /bin/bash -- -c "$AUTHBIND_COMMAND $TOMCAT_SH" + status="$?" + set +a -e + return $status +} + +case "$1" in + start) + if [ -z "$JAVA_HOME" ]; then + log_failure_msg "no JDK found - please set JAVA_HOME" + exit 1 + fi + + if [ ! -d "$CATALINA_BASE/conf" ]; then + log_failure_msg "invalid CATALINA_BASE: $CATALINA_BASE" + exit 1 + fi + + log_daemon_msg "Starting $DESC" "$NAME" + if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user $TOMCAT7_USER --exec "$JAVA_HOME/bin/java" \ + >/dev/null; then + + # Regenerate POLICY_CACHE file + umask 022 + echo "// AUTO-GENERATED FILE from {{ item.instance_path }}/conf/policy.d/" \ + > "$POLICY_CACHE" + echo "" >> "$POLICY_CACHE" + cat $CATALINA_BASE/conf/policy.d/*.policy \ + >> "$POLICY_CACHE" + + # Remove / recreate JVM_TMP directory + rm -rf "$JVM_TMP" + mkdir -p "$JVM_TMP" || { + log_failure_msg "could not create JVM temporary directory" + exit 1 + } + chown $TOMCAT7_USER "$JVM_TMP" + + catalina_sh start $SECURITY + sleep 5 + if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user $TOMCAT7_USER --exec "$JAVA_HOME/bin/java" \ + >/dev/null; then + if [ -f "$CATALINA_PID" ]; then + rm -f "$CATALINA_PID" + fi + log_end_msg 1 + else + log_end_msg 0 + fi + else + log_progress_msg "(already running)" + log_end_msg 0 + fi + ;; + stop) + log_daemon_msg "Stopping $DESC" "$NAME" + + set +e + if [ -f "$CATALINA_PID" ]; then + start-stop-daemon --stop --pidfile "$CATALINA_PID" \ + --user "$TOMCAT7_USER" \ + --retry=TERM/20/KILL/5 >/dev/null + if [ $? -eq 1 ]; then + log_progress_msg "$DESC is not running but pid file exists, cleaning up" + elif [ $? -eq 3 ]; then + PID="`cat $CATALINA_PID`" + log_failure_msg "Failed to stop $NAME (pid $PID)" + exit 1 + fi + rm -f "$CATALINA_PID" + rm -rf "$JVM_TMP" + else + log_progress_msg "(not running)" + fi + log_end_msg 0 + set -e + ;; + status) + set +e + start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user $TOMCAT7_USER --exec "$JAVA_HOME/bin/java" \ + >/dev/null 2>&1 + if [ "$?" = "0" ]; then + + if [ -f "$CATALINA_PID" ]; then + log_success_msg "$DESC is not running, but pid file exists." + exit 1 + else + log_success_msg "$DESC is not running." + exit 3 + fi + else + log_success_msg "$DESC is running with pid `cat $CATALINA_PID`" + fi + set -e + ;; + restart|force-reload) + if [ -f "$CATALINA_PID" ]; then + $0 stop + sleep 1 + fi + $0 start + ;; + try-restart) + if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user $TOMCAT7_USER --exec "$JAVA_HOME/bin/java" \ + >/dev/null; then + $0 start + fi + ;; + *) + log_success_msg "Usage: $0 {start|stop|restart|try-restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/tomcat-multiple-instances/templates/tomcat-server.xml.j2 b/tomcat-multiple-instances/templates/tomcat-server.xml.j2 new file mode 100644 index 00000000..bb9485ee --- /dev/null +++ b/tomcat-multiple-instances/templates/tomcat-server.xml.j2 @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + +{% if item.http_enabled %} + +{% endif %} + + +{% if item.http_enabled %} + + +{% endif %} + +{% if item.ajp_enabled %} + + +{% endif %} + + + + + + + + + + + +{% if item.access_log_enabled %} + + +{% endif %} + + + + diff --git a/tomcat-multiple-instances/templates/tomcat.logrotate.j2 b/tomcat-multiple-instances/templates/tomcat.logrotate.j2 new file mode 100644 index 00000000..64877dea --- /dev/null +++ b/tomcat-multiple-instances/templates/tomcat.logrotate.j2 @@ -0,0 +1,17 @@ +{{ tomcat_m_instances_logdir_base }}/{{ item.http_port }}/catalina.out { + copytruncate + {{ item.log_rotation_freq }} + rotate {{ item.log_retain }} + compress + missingok + create 640 {{ item.user }} adm +} + +{{ tomcat_m_instances_logdir_base }}/{{ item.http_port }}/localhost_access.log { + copytruncate + {{ item.log_rotation_freq }} + rotate {{ item.log_retain }} + compress + missingok + create 640 {{ item.user }} adm +} diff --git a/tomcat/defaults/main.yml b/tomcat/defaults/main.yml new file mode 100644 index 00000000..0c5fec7f --- /dev/null +++ b/tomcat/defaults/main.yml @@ -0,0 +1,94 @@ +--- +tomcat_version: 7 +tomcat_pkg_state: installed +tomcat_service_enabled: True +tomcat_user: tomcat7 +tomcat_max_threads: 200 +tomcat_min_heap_size: 2048m +tomcat_heap_size: '{{ tomcat_min_heap_size }}' +tomcat_permgen_size: 512m +tomcat_java_opts: "-Xms{{ tomcat_min_heap_size }} -Xmx{{ tomcat_heap_size }} -XX:MaxPermSize={{ tomcat_permgen_size }}" +tomcat_java_gc_opts: "-XX:+UseConcMarkSweepGC" +#tomcat_other_java_opts: "-Djsse.enableSNIExtension=false" +tomcat_other_java_opts: "" +tomcat_install_default_conf: True +tomcat_load_additional_default_conf: True +tomcat_http_enabled: True +tomcat_http_port: 8080 +tomcat_http_address: 0.0.0.0 +tomcat_autodeploy: False +tomcat_ajp_enabled: False +tomcat_ajp_port: 8009 +tomcat_ajp_address: 127.0.0.1 +# There is a bug that kills tomcat after 50 days if the shutdown port is enabled +# Disable the shutdown port by default +#tomcat_shutdown_port: 8005 +tomcat_shutdown_port: -1 +tomcat_shutdown_pwd: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,hexdigits,punctuation') }}" +tomcat_restart_timeout: 300 +tomcat_catalina_home_dir: '/usr/share/tomcat{{ tomcat_version }}' +tomcat_catalina_base_dir: '/var/lib/tomcat{{ tomcat_version }}' +tomcat_conf_dir: '/etc/tomcat{{ tomcat_version }}' +tomcat_webapps_dir: '{{ tomcat_catalina_base_dir }}/webapps' +tomcat_tmp_dir: '{{ tomcat_catalina_base_dir }}/tmp/tomcat' + +# JMX and debugging +tomcat_enable_remote_debugging: False +tomcat_remote_debugging_port: 8000 +tomcat_jmx_enabled: False +tomcat_jmx_auth_enabled: False +tomcat_jmx_port: 8082 +tomcat_jmx_auth_dir: '{{ tomcat_conf_dir }}' +tomcat_jmx_use_ssl: False +# The following work with jdk >= 7.0.25 only +tomcat_jmx_disable_additional_ports: True +tomcat_jmx_localhost_only: False +# tomcat_jmx_monitorpass: define_in_a_vault_file +# tomcat_jmx_controlpass: define_in_a_vault_file + +# tomcat logging +tomcat_logdir: '/var/log/tomcat{{ tomcat_version }}' +tomcat_use_log4j: True +tomcat_install_the_log4j_properties: True +tomcat_retain_old_logs: 30 +tomcat_log_rotation_threshold: "ALL" +tomcat_log_max_file_size: "100MB" +tomcat_log_level: INFO +tomcat_log_logger: CATALINA +tomcat_access_log_enabled: True +tomcat_access_log_rotation_freq: "daily" +# +# Define them if you want to send all the logs to an ELK installation +tomcat_send_to_logstash: False +tomcat_logstash_collector_host: logstash +tomcat_logstash_collector_socketappender_port: 4560 +tomcat_logstash_collector_socketappender_reconndelay: 10000 +# Set to LOGSTASH only if you do not want local logs +tomcat_logstash_logger: CATALINA, LOGSTASH + + +#tomcat_access_log_file_name: localhost_access.log +# +# Administrative interface +tomcat_install_admin: False +tomcat_manager_gui_user_enabled: True +tomcat_manager_gui_user: guiadmin +tomcat_manager_gui_r: "manager-gui" +#tomcat_manager_gui_pwd: *See the vault file* +tomcat_manager_script_user_enabled: False +tomcat_manager_script_user: scriptadmin +tomcat_manager_script_r: "manager-script" +#tomcat_manager_script_pwd: *See the vault file* +tomcat_manager_jmx_user_enabled: False +tomcat_manager_jmx_user: jmxadmin +tomcat_manager_jmx_r: "manager-jmx" +#tomcat_manager_jmx_pwd: *See the vault file* +tomcat_manager_status_user_enabled: False +tomcat_manager_status_user: statusadmin +tomcat_manager_status_r: "manager-status" +#tomcat_manager_status_pwd: *See the vault file* +# +tomcat_install_jdbc: False +tomcat_install_pg_jdbc: '{{ tomcat_install_jdbc }}' +# Not used yet +tomcat_install_mysql_jdbc: False diff --git a/tomcat/files/jmxremote.access b/tomcat/files/jmxremote.access new file mode 100644 index 00000000..c5aab07e --- /dev/null +++ b/tomcat/files/jmxremote.access @@ -0,0 +1,2 @@ +monitorRole readonly +controlRole readwrite diff --git a/tomcat/files/logging.properties b/tomcat/files/logging.properties new file mode 100644 index 00000000..6eeb1814 --- /dev/null +++ b/tomcat/files/logging.properties @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler + +.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler + +############################################################ +# Handler specific properties. +# Describes specific configuration info for Handlers. +############################################################ + +1catalina.org.apache.juli.FileHandler.level = FINE +1catalina.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +1catalina.org.apache.juli.FileHandler.prefix = catalina. + +2localhost.org.apache.juli.FileHandler.level = FINE +2localhost.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +2localhost.org.apache.juli.FileHandler.prefix = localhost. + +java.util.logging.ConsoleHandler.level = FINE +java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter + +############################################################ +# Facility specific properties. +# Provides extra control for each logger. +############################################################ + +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler + +# For example, set the com.xyz.foo logger to only log SEVERE +# messages: +#org.apache.catalina.startup.ContextConfig.level = FINE +#org.apache.catalina.startup.HostConfig.level = FINE +#org.apache.catalina.session.ManagerBase.level = FINE +#org.apache.catalina.core.AprLifecycleListener.level=FINE diff --git a/tomcat/files/tomcat6-juli-adapters.jar b/tomcat/files/tomcat6-juli-adapters.jar new file mode 100644 index 00000000..8b75f237 Binary files /dev/null and b/tomcat/files/tomcat6-juli-adapters.jar differ diff --git a/tomcat/files/tomcat6-juli-log4j.jar b/tomcat/files/tomcat6-juli-log4j.jar new file mode 100644 index 00000000..a8c8b4fa Binary files /dev/null and b/tomcat/files/tomcat6-juli-log4j.jar differ diff --git a/tomcat/files/tomcat7-juli-adapters.jar b/tomcat/files/tomcat7-juli-adapters.jar new file mode 100644 index 00000000..1eedbfc5 Binary files /dev/null and b/tomcat/files/tomcat7-juli-adapters.jar differ diff --git a/tomcat/files/tomcat7-juli-log4j.jar b/tomcat/files/tomcat7-juli-log4j.jar new file mode 100644 index 00000000..b09bf783 Binary files /dev/null and b/tomcat/files/tomcat7-juli-log4j.jar differ diff --git a/tomcat/handlers/main.yml b/tomcat/handlers/main.yml new file mode 100644 index 00000000..058d7a4e --- /dev/null +++ b/tomcat/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: tomcat restart + service: name=tomcat'{{ tomcat_version }}' state=restarted sleep=20 + when: tomcat_service_enabled + +- name: enable tomcat + service: name=tomcat'{{ tomcat_version }}' state=started enabled=yes + when: tomcat_service_enabled + +- name: disable tomcat + service: name=tomcat'{{ tomcat_version }}' state=started enabled=no + when: not tomcat_service_enabled diff --git a/tomcat/tasks/access_log.yml b/tomcat/tasks/access_log.yml new file mode 100644 index 00000000..f827be01 --- /dev/null +++ b/tomcat/tasks/access_log.yml @@ -0,0 +1,7 @@ +--- +- name: Install a logrotate entry for the access log file + template: src=tomcat_access.logrotate.j2 dest=/etc/logrotate.d/tomcat_access owner=root group=root mode=0644 + when: tomcat_access_log_enabled + tags: + - tomcat + diff --git a/tomcat/tasks/main.yml b/tomcat/tasks/main.yml new file mode 100644 index 00000000..11ba07e5 --- /dev/null +++ b/tomcat/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- include: tomcat-pkgs.yml +- include: tomcat-admin.yml + when: tomcat_install_admin +- include: tomcat-jmx.yml + when: + - tomcat_jmx_enabled + - tomcat_jmx_auth_enabled +- include: tomcat-log4j-logging.yml + when: tomcat_use_log4j +- include: tomcat-logger-logging.yml + when: not tomcat_use_log4j +- include: access_log.yml + when: tomcat_access_log_enabled +- include: pgsql_jdbc.yml + when: tomcat_install_pg_jdbc +- include: not_pgsql_jdbc.yml + when: not tomcat_install_pg_jdbc + + diff --git a/tomcat/tasks/not_pgsql_jdbc.yml b/tomcat/tasks/not_pgsql_jdbc.yml new file mode 100644 index 00000000..0a5882d3 --- /dev/null +++ b/tomcat/tasks/not_pgsql_jdbc.yml @@ -0,0 +1,11 @@ +--- +- name: Do not load the postgresql jdbc driver on tomcat if not needed + file: dest={{ tomcat_catalina_home_dir }}/lib/{{ item }} state=absent + with_items: + - postgresql-jdbc4.jar + when: not tomcat_install_pg_jdbc + notify: + tomcat restart + tags: + - tomcat + diff --git a/tomcat/tasks/pgsql_jdbc.yml b/tomcat/tasks/pgsql_jdbc.yml new file mode 100644 index 00000000..9872fe1a --- /dev/null +++ b/tomcat/tasks/pgsql_jdbc.yml @@ -0,0 +1,22 @@ +--- +# Postgresql JDBC +- name: Install the jdbc package if needed + apt: pkg={{ item }} state=installed + with_items: + - libpostgresql-jdbc-java + when: tomcat_install_pg_jdbc + tags: + - tomcat + - tomcat_jdbc + +- name: Configure tomcat to use the global postgresql jdbc driver + file: src=/usr/share/java/{{ item }} dest=/usr/share/tomcat{{ tomcat_version }}/lib/{{ item }} state=link + with_items: + - postgresql-jdbc4.jar + when: tomcat_install_pg_jdbc + notify: + tomcat restart + tags: + - tomcat + - tomcat_jdbc + diff --git a/tomcat/tasks/tomcat-admin.yml b/tomcat/tasks/tomcat-admin.yml new file mode 100644 index 00000000..836f1d6d --- /dev/null +++ b/tomcat/tasks/tomcat-admin.yml @@ -0,0 +1,15 @@ +--- +- name: Install the tomcat console management package + apt: pkg={{ item }} state={{ tomcat_pkg_state }} + with_items: + - tomcat'{{ tomcat_version }}'-admin + tags: + - tomcat + +- name: Install the tomcat users file + template: src=tomcat-users.xml.j2 dest={{ tomcat_conf_dir }}/tomcat-users.xml owner=root group={{ tomcat_user }} mode=0640 + notify: + tomcat restart + tags: + - tomcat + diff --git a/tomcat/tasks/tomcat-jmx.yml b/tomcat/tasks/tomcat-jmx.yml new file mode 100644 index 00000000..4f0fbff6 --- /dev/null +++ b/tomcat/tasks/tomcat-jmx.yml @@ -0,0 +1,22 @@ +--- +- name: Distribute the jmx authorization file + template: src=jmxremote.passwd.j2 dest={{ tomcat_jmx_auth_dir }}/jmxremote.passwd owner={{ tomcat_user }} mode=0600 + when: + - tomcat_jmx_enabled + - tomcat_jmx_auth_enabled + notify: + - tomcat restart + tags: + - tomcat + - jmx + +- name: Distribute the jmx role file + copy: src=jmxremote.access dest={{ tomcat_jmx_auth_dir }}/jmxremote.access owner=root mode=0644 + when: + - tomcat_jmx_enabled + - tomcat_jmx_auth_enabled + notify: + - tomcat restart + tags: + - tomcat + - jmx diff --git a/tomcat/tasks/tomcat-log4j-logging.yml b/tomcat/tasks/tomcat-log4j-logging.yml new file mode 100644 index 00000000..403161d9 --- /dev/null +++ b/tomcat/tasks/tomcat-log4j-logging.yml @@ -0,0 +1,53 @@ +--- +# Manage tomcat internal logs with log4j +- name: Install log4j + apt: pkg=liblog4j1.2-java state={{ tomcat_pkg_state }} + notify: + tomcat restart + tags: + - tomcat + +- name: Install tomcat-juli-adapters + copy: src=tomcat{{ tomcat_version }}-juli-adapters.jar dest=/usr/share/java/tomcat-juli-adapters.jar + tags: + - tomcat + +- name: Install tomcat-juli + copy: src=tomcat{{ tomcat_version }}-juli-log4j.jar dest=/usr/share/java/tomcat-juli-log4j.jar + tags: + - tomcat + +- name: Configure tomcat to use the log4j system library + file: src=/usr/share/java/{{ item }} dest={{ tomcat_catalina_home_dir }}/lib/{{ item }} state=link + with_items: + - log4j-1.2.jar + - tomcat-juli-adapters.jar + notify: + tomcat restart + tags: + - tomcat + +- name: Configure tomcat to use the log4j version of the juli library + file: src=/usr/share/java/{{ item }} dest={{ tomcat_catalina_home_dir }}/bin/tomcat-juli.jar state=link + with_items: + - tomcat-juli-log4j.jar + notify: + tomcat restart + tags: + - tomcat + +- name: Install log4j.properties + template: src=log4j.properties.j2 dest={{ tomcat_catalina_home_dir }}/lib/log4j.properties mode=0644 owner=root group=root + when: tomcat_install_the_log4j_properties + notify: + tomcat restart + tags: + - tomcat + +- name: Remove logging.properties + file: dest=/etc/tomcat{{ tomcat_version }}/logging.properties state=absent + notify: + tomcat restart + tags: + - tomcat + diff --git a/tomcat/tasks/tomcat-logger-logging.yml b/tomcat/tasks/tomcat-logger-logging.yml new file mode 100644 index 00000000..fe379adc --- /dev/null +++ b/tomcat/tasks/tomcat-logger-logging.yml @@ -0,0 +1,34 @@ +--- +- name: Remove the system log4j library from the tomcat libdir + file: dest={{ tomcat_catalina_home_dir }}/lib/{{ item }} state=absent + with_items: + - log4j-1.2.jar + - tomcat-juli-adapters.jar + notify: + tomcat restart + tags: + - tomcat + +- name: Configure tomcat to use the standard version of the juli library + file: src=/usr/share/java/{{ item }} dest={{ tomcat_catalina_home_dir }}/bin/{{ item }} state=link + with_items: + - tomcat-juli.jar + notify: + tomcat restart + tags: + - tomcat + +- name: Remove the system log4j.properties + file: dest={{ tomcat_catalina_home_dir }}/lib/log4j.properties state=absent + notify: + tomcat restart + tags: + - tomcat + +- name: Install logging.properties + copy: src=logging.properties dest=/etc/tomcat{{ tomcat_version }}/logging.properties owner=root group=root mode=0644 + notify: + tomcat restart + tags: + - tomcat + diff --git a/tomcat/tasks/tomcat-pkgs.yml b/tomcat/tasks/tomcat-pkgs.yml new file mode 100644 index 00000000..d40a52e8 --- /dev/null +++ b/tomcat/tasks/tomcat-pkgs.yml @@ -0,0 +1,44 @@ +--- +- name: Install the tomcat packages + apt: pkg={{ item }} state={{ tomcat_pkg_state }} + with_items: + - tomcat'{{ tomcat_version }}' + - libtomcat'{{ tomcat_version }}'-java + - tomcat'{{ tomcat_version }}'-common + - libapr1 + tags: + - tomcat + +- name: Create the tomcat tmp directory + file: dest={{ tomcat_tmp_dir }} state=directory owner={{ tomcat_user }} group={{ tomcat_user }} + notify: + tomcat restart + tags: + - tomcat + +- name: Configure tomcat defaults + template: src=tomcat-default.j2 dest=/etc/default/tomcat{{ tomcat_version }} + when: + - tomcat_install_default_conf is defined and tomcat_install_default_conf + notify: + tomcat restart + tags: + - tomcat + +- name: Configure tomcat server.xml + template: src=tomcat-server.xml.j2 dest={{ tomcat_conf_dir }}/server.xml + when: + - tomcat_install_default_conf is defined and tomcat_install_default_conf + notify: + tomcat restart + tags: + - tomcat + +- name: Create some directories that the package do not creates itself + file: dest={{ tomcat_catalina_home_dir }}/{{ item }} state=directory owner={{ tomcat_user }} group={{ tomcat_user }} mode=0755 + with_items: + - common/classes + - server/classes + - shared/classes + tags: + - tomcat diff --git a/tomcat/templates/jmxremote.passwd.j2 b/tomcat/templates/jmxremote.passwd.j2 new file mode 100644 index 00000000..cfca21cc --- /dev/null +++ b/tomcat/templates/jmxremote.passwd.j2 @@ -0,0 +1,2 @@ +monitorRole {{ tomcat_jmx_monitorpass }} +controlRole {{ tomcat_jmx_controlpass }} diff --git a/tomcat/templates/log4j.properties.j2 b/tomcat/templates/log4j.properties.j2 new file mode 100644 index 00000000..12dfa954 --- /dev/null +++ b/tomcat/templates/log4j.properties.j2 @@ -0,0 +1,68 @@ +{% if tomcat_send_to_logstash %} +log4j.rootLogger = {{ tomcat_log_level }}, {{ tomcat_logstash_logger }} +{% else %} +log4j.rootLogger = {{ tomcat_log_level }}, {{ tomcat_log_logger }} +{% endif %} + +# Define all the appenders +log4j.appender.CATALINA = org.apache.log4j.RollingFileAppender +log4j.appender.CATALINA.File = ${catalina.base}/logs/catalina.log +log4j.appender.CATALINA.Append = true +log4j.appender.CATALINA.Encoding = UTF-8 +log4j.appender.CATALINA.Threshold = {{ tomcat_log_rotation_threshold }} +log4j.appender.CATALINA.MaxFileSize = {{ tomcat_log_max_file_size }} +log4j.appender.CATALINA.MaxBackupIndex = {{ tomcat_retain_old_logs }} +log4j.appender.CATALINA.layout = org.apache.log4j.PatternLayout +log4j.appender.CATALINA.layout.ConversionPattern = %d [%t] %-5p %c- %m%n + +log4j.appender.LOCALHOST = org.apache.log4j.RollingFileAppender +log4j.appender.LOCALHOST.File = ${catalina.base}/logs/localhost.log +log4j.appender.LOCALHOST.Append = true +log4j.appender.LOCALHOST.Encoding = UTF-8 +log4j.appender.LOCALHOST.Threshold = {{ tomcat_log_rotation_threshold }} +log4j.appender.LOCALHOST.MaxFileSize = {{ tomcat_log_max_file_size }} +log4j.appender.LOCALHOST.MaxBackupIndex = {{ tomcat_retain_old_logs }} +log4j.appender.LOCALHOST.layout = org.apache.log4j.PatternLayout +log4j.appender.LOCALHOST.layout.ConversionPattern = %d [%t] %-5p %c- %m%n + +log4j.appender.MANAGER = org.apache.log4j.RollingFileAppender +log4j.appender.MANAGER.File = ${catalina.base}/logs/manager.log +log4j.appender.MANAGER.Append = true +log4j.appender.MANAGER.Encoding = UTF-8 +log4j.appender.MANAGER.Threshold = {{ tomcat_log_rotation_threshold }} +log4j.appender.MANAGER.MaxFileSize = {{ tomcat_log_max_file_size }} +log4j.appender.MANAGER.MaxBackupIndex = {{ tomcat_retain_old_logs }} +log4j.appender.MANAGER.layout = org.apache.log4j.PatternLayout +log4j.appender.MANAGER.layout.ConversionPattern = %d [%t] %-5p %c- %m%n + +log4j.appender.HOST-MANAGER = org.apache.log4j.RollingFileAppender +log4j.appender.HOST-MANAGER.File = ${catalina.base}/logs/host-manager.log +log4j.appender.HOST-MANAGER.Append = true +log4j.appender.HOST-MANAGER.Encoding = UTF-8 +log4j.appender.HOST-MANAGER.Threshold = {{ tomcat_log_rotation_threshold }} +log4j.appender.HOST-MANAGER.MaxFileSize = {{ tomcat_log_max_file_size }} +log4j.appender.HOST-MANAGER.MaxBackupIndex = {{ tomcat_retain_old_logs }} +log4j.appender.HOST-MANAGER.layout = org.apache.log4j.PatternLayout +log4j.appender.HOST-MANAGER.layout.ConversionPattern = %d [%t] %-5p %c- %m%n + +{% if tomcat_send_to_logstash %} +log4j.appender.LOGSTASH=org.apache.log4j.net.SocketAppender +log4j.appender.LOGSTASH.remoteHost={{ tomcat_logstash_collector_host }} +log4j.appender.LOGSTASH.port={{ tomcat_logstash_collector_socketappender_port }} +log4j.appender.LOGSTASH.ReconnectionDelay={{ tomcat_logstash_collector_socketappender_reconndelay }} +log4j.appender.LOGSTASH.LocationInfo=true +log4j.appender.LOGSTASH.layout = org.apache.log4j.PatternLayout +log4j.appender.LOGSTASH.layout.ConversionPattern = %d [%t] %-5p %c- %m%n +{% endif %} + +log4j.appender.CONSOLE = org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.Encoding = UTF-8 +log4j.appender.CONSOLE.layout = org.apache.log4j.PatternLayout +log4j.appender.CONSOLE.layout.ConversionPattern = %d [%t] %-5p %c- %m%n + +# Configure which loggers log to which appenders +log4j.logger.org.apache.catalina.core.ContainerBase.[Catalina].[localhost] = {{ tomcat_log_level }}, LOCALHOST +log4j.logger.org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager] =\ + {{ tomcat_log_level }}, MANAGER +log4j.logger.org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager] =\ + {{ tomcat_log_level }}, HOST-MANAGER diff --git a/tomcat/templates/tomcat-default.j2 b/tomcat/templates/tomcat-default.j2 new file mode 100644 index 00000000..127207d7 --- /dev/null +++ b/tomcat/templates/tomcat-default.j2 @@ -0,0 +1,47 @@ +# Run Tomcat as this user ID. Not setting this or leaving it blank will use the +# default of tomcat{{ tomcat_version}}. +TOMCAT{{ tomcat_version}}_USER={{ tomcat_user }} + +# Run Tomcat as this group ID. Not setting this or leaving it blank will use +# the default of tomcat{{ tomcat_version}}. +TOMCAT{{ tomcat_version}}_GROUP={{ tomcat_user }} + +# The home directory of the Java development kit (JDK). You need at least +# JDK version 1.5. If JAVA_HOME is not set, some common directories for +# OpenJDK, the Sun JDK, and various J2SE 1.5 versions are tried. +{% if jdk_java_home is defined %} +JAVA_HOME={{ jdk_java_home }} +{% endif %} + +JAVA_OPTS="-server -Djava.awt.headless=true" +{% if tomcat_java_opts is defined %} +JAVA_OPTS="{{ tomcat_java_opts }}" +{% endif %} +{% if tomcat_java_gc_opts is defined %} +JAVA_OPTS="${JAVA_OPTS} {{ tomcat_java_gc_opts }}" +{% endif %} +{% if tomcat_other_java_opts is defined %} +JAVA_OPTS="${JAVA_OPTS} {{ tomcat_other_java_opts }}" +{% endif %} +{% if tomcat_jmx_enabled %} +JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port={{ tomcat_jmx_port }} -Dcom.sun.management.jmxremote.ssl=false" +{% if tomcat_jmx_auth_enabled %} +JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote.password.file={{ tomcat_jmx_auth_dir }}/jmxremote.password -Dcom.sun.management.jmxremote.access.file={{ tomcat_jmx_auth_dir }}/jmxremote.access" +{% else %} +JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote.authenticate=false" +{% endif %} +{% endif %} +{% if tomcat_enable_remote_debugging %} +# To enable remote debugging uncomment the following line. +# You will then be able to use a java debugger on port {{ tomcat_remote_debugging_port }}. +JAVA_OPTS="${JAVA_OPTS} -Xdebug -Xrunjdwp:transport=dt_socket,address={{ tomcat_remote_debugging_port }},server=y,suspend=n" +{% endif %} +# Location of the JVM temporary directory +# WARNING: This directory will be destroyed and recreated at every startup ! +JVM_TMP={{ tomcat_tmp_dir }} + +{% if tomcat_load_additional_default_conf %} +if [ -f /etc/default/tomcat.local ] ; then + . /etc/default/tomcat.local +fi +{% endif %} diff --git a/tomcat/templates/tomcat-server.xml.j2 b/tomcat/templates/tomcat-server.xml.j2 new file mode 100644 index 00000000..3c6a41df --- /dev/null +++ b/tomcat/templates/tomcat-server.xml.j2 @@ -0,0 +1,149 @@ + + + + + + + + + + + + + + + + + + + + + + + +{% if tomcat_http_enabled %} + + +{% endif %} + + +{% if tomcat_http_enabled %} + + + + + +{% endif %} +{% if tomcat_ajp_enabled %} + + +{% endif %} + + + + + + + + + + + + + + + + + + + + +{% if tomcat_access_log_enabled %} + + +{% endif %} + + + + diff --git a/tomcat/templates/tomcat-users.xml.j2 b/tomcat/templates/tomcat-users.xml.j2 new file mode 100644 index 00000000..f2563e4f --- /dev/null +++ b/tomcat/templates/tomcat-users.xml.j2 @@ -0,0 +1,40 @@ + + + + +{% if tomcat_manager_gui_user_enabled %} + + +{% endif %} +{% if tomcat_manager_script_user_enabled %} + + +{% endif %} +{% if tomcat_manager_jmx_user_enabled %} + + +{% endif %} +{% if tomcat_manager_status_user_enabled %} + + +{% endif %} + diff --git a/tomcat/templates/tomcat_access.logrotate.j2 b/tomcat/templates/tomcat_access.logrotate.j2 new file mode 100644 index 00000000..a4082fd9 --- /dev/null +++ b/tomcat/templates/tomcat_access.logrotate.j2 @@ -0,0 +1,8 @@ +{{ tomcat_logdir }}/localhost_access.log { + copytruncate + {{ tomcat_access_log_rotation_freq }} + rotate {{ tomcat_retain_old_logs }} + compress + missingok + create 640 {{ tomcat_user }} adm +} diff --git a/ubuntu-deb-general/defaults/main.yml b/ubuntu-deb-general/defaults/main.yml new file mode 100644 index 00000000..4d51e02f --- /dev/null +++ b/ubuntu-deb-general/defaults/main.yml @@ -0,0 +1,92 @@ +--- +cm_pubkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJN8XR/N4p6FfymWJy7mwR3vbUboC4P+7CgZalflhK5iH0P7c24/zZDY9Y5QIq58IViY7napqZuRkNHnHcvm9mxtSxQ16qe03NulABN5V/ljgR0sQAWz8pwv68LDpR9uBSCbXDdDCUUlS+zOxCHA6s7O7PSFavX4An1Vd/mjwoeR4eLRQXNcKsK2Pu/BZ3TCLmWyi2otnxFiJ8IoKW1CvjxKWmt5BvAvys0dfsdnTSVz9yiUMwN5Oj8cw/jhKqadnkvqTGfGl1ELm9L2V7hT6LM0cIom9oRsQf+JJ6loBe3UUZGaAhY2jmARmZdX3qV9Wh+UtxaWMEAXB9mf/2cK9f jenkins@cm +andrea_dellamico: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ9n6B+J5S7NPnwjejPC2WrvcRzC07WPnAoQ7ZHZ0Mv9JakyWItswzI3Drz/zI0mCamyuye+9dWz9v/ZRwUfBobVyXuptRaZIwxlMC/KsTZofpp3RHOBTteZ4/VM0VhEeiOHu+GuzNE0fRB2gsusWeMMae2cq4TjVAOMcQmJX496L703Smc14gFrP8y/P9jbC5HquuVnPR29PsW4mHidPmjdKkO7QmDfFAj44pEUGeInYOJe708C03NCpsjHw8AVdAJ6Pf16EOdDH+z8D6CByVO3s8UT0HJ85BRoIy6254/hmYLzyd/eRnCXHS/dke+ivrlA3XxG4+DmqjuJR/Jpfx adellam@semovente +tommaso_piccioli: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAzcHuDU7PgJwz34AsVG0E2+ZRx17ZKW1uDEGABNk3Z60/c9LTwWKPj6kcIRy6RzFJI5X+IgPJnYouXVmJsIWjVL8IRk8fP1ffJC6Fyf6H7+fCxu/Wwed5OoOCvKeZ0bEmJ1tlXFM6+EnxKqLCvz3fsNy8e4WKMnpS1hT8K6YB7PMjt60S3wOaxds1Lv4NmmgnfGM5uZFYrZCx1/GJCzNSh7AEEEUIVQ1B8xmXbet7whNiwDmiOnXSlt38dkIYT8kNMuRCj/r9wPr7FmoUCOFzUVXTcnuYagKyURrZ8QDyHbK6XQLYXgvCz/lWoErGFbDqpmBHHyvKSeLPxYfJpWJ70w== tom@tom +backup_agent: ssh-dss AAAAB3NzaC1kc3MAAACBANBn5i7oJd12+GAeDVSAiPqCxcCDzWe41g3Vy/LhbYKwG0smPNJRfvyf7lKWkgolJfMJZrk7bBVhJoApkV7vkFkrSPueyRC+/ohjafpOsmxRYiOaSrDZ2c9TbGFVZTh23pUXoDPp2Z0N8l471b9Mx/nqgtflCV+IVICcDZbUhcCTAAAAFQC+fmfljTFllCMKsgrSJcQAtiIT/QAAAIEAvrsLfmQzHQjt4G5FhcPVbvP87KUsDh0xksCfMRP6bQBz/3mcnt7V5/MLll/CZMiOWjRK3ww9zCYHprUwQtAZSllFWiGUKw1tDvf1ZQGESYP/vvWwcpPZpVsRHlhRtuMsQchSRxw03yYOqEEa2akWzQlvaZ4CWWym931mZg6zY4AAAACAG/l8dU/QEMK1JP3rDV0kZYvcxjUC9Mxw5ScTyVqVnxDL75ssX9HiQamsiTk0dYNyl8qkB38FfkB4LhEb8FkHs4toN+nTNPPlLqhpYMs+anwyNy32LnXAVP02VJ2+3exwGe0b5vtIFpj+j8s7YZMHN5x6d4xhZ9oq5M2pJN6M48E= root@dlibbackup +monja_dariva: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuQJvgDc8lQB+EArajGPEirRuYxGcInfiM3uRS0P5Dhqch6cuNdMFFjCoQVFL2Dvs7QNSRm8mvnPLWOCYLEFPBdXlA63w+n3VWoVOs0lUgQM77/axetd/K8BCkJlcA/exvVxLtzc5k8hN1k3OJY/Npi2Xa4WyEMV6t7+vYK3MXPjFBy4Y/aLWZvHcCn0zUbeB8T8PJ2S8taCIOMzemUzjGs3c0f4y6oaJx1gPw31PCahkaVS4ZLSt+0y3DRaGiXjyzgbQPf1whBOT4SSiX3SgdMvxA/Fzz2sSAn9PNfKq+/vygn7qDB79qzBhOXs36dPuwmsqggxIZasGUT/YfRp5Cw== monja@pc-monja + +old_marko_mikulicic: ssh-dss AAAAB3NzaC1kc3MAAACBAO/KjuevegLjP3SXeZAdmHySuOjlNWllsuurdzes9HwF7HBEtFAuSE7vBeNcpfsdUytq92JUBAwNk9VwxNnnyVgeznFQ7ocGBh0Yfu4j9EXiWVA7vO8xZ9kqjl+HwUELrR1a8d4mngXgNQ1OAm+i3vvpBA6b4CV2L2hrEsPL5LPVAAAAFQD0VroYiG13uOsHCJaVyWH6V7w4twAAAIA4moWcTj36r+FpJYHH3c+QGC8XgPi6mwsqJexJ3sZRfEDAuDTgB5UyLJStY5EE2pChVpACx8KDlONcyuCdA8HIDC+RAJ03tY//UR2Ndg1y0yH8BnpjFM9Ow5JcoWzz9clC4GD0zGA90aiQd37I3JfPoTTEjLvJegg/C8GtlLtB+AAAAIEAgHwTzFLfZ0Q5tDK/kxeKa/x52O4ZfOXBTOYQZy5A6+ohoOOIKuEYmUOxh9ovE38St2+Q+1CgGnhBA79Y2pBdzpvY6VwKdcQBtyZSsJ7ghMTpksdNwZkZ3rIDgMi0yeBUl9qe339dXzV77uM/Q8Tx0UhSHTEIpyu1WZ8d/AAqrCQ= marko + +root_ssh_keys: + - '{{ cm_pubkey }}' + - '{{ andrea_dellamico }}' + - '{{ tommaso_piccioli }}' + - '{{ backup_agent }}' + - '{{ monja_dariva }}' + +obsolete_root_ssh_keys: + - '{{ old_marko_mikulicic }}' + +# +# Use the apt proxy +# +use_apt_proxy: False +apt_proxy_url: "http://apt.research-infrastructures.eu:9999" + +common_packages: + - zile + - dstat + - iotop + - wget + - vim-tiny + - psmisc + - tcpdump + - lsof + - strace + - rsync + - multitail + - unzip + - htop + - tree + - bind9-host + - bash-completion + +# Unattended upgrades +unatt_allowed_origins: + - '${distro_id}:${distro_codename}-security' +#unatt_blacklisted: +# - libc6 +unatt_autofix: "true" +# When true, the procedure is really slow +unatt_minimalsteps: "false" +unatt_install_on_shutdown: "false" +#unatt_email: sysadmin@isti.cnr.it +unatt_email_on_error: "false" +unatt_autoremove: "true" +unatt_autoreboot: "false" +unatt_autoreboot_time: "now" + +# +# Defaults +# +cleanup_base_packages: True +base_packages_to_remove: + - ppp + - at + +cleanup_x_base_packages: False +x_base_packages_to_remove: + - firefox-locale-en + - x11-common + +cleanup_nfs_packages: False +nfs_packages: + - nfs-common + - portmap + +cleanup_rpcbind_packages: False +rpcbind_packages: + - rpcbind + +disable_ipv6: True +ipv6_sysctl_value: 1 +ipv6_sysctl_file: /etc/sysctl.d/10-ipv6-disable.conf + +# Install our /etc/resolv.conf +install_resolvconf: True + +# Install and configure munin +configure_munin: True + +# Manage the root ssh keys +manage_root_ssh_keys: True diff --git a/ubuntu-deb-general/files/02proxy b/ubuntu-deb-general/files/02proxy new file mode 100644 index 00000000..c3949dc0 --- /dev/null +++ b/ubuntu-deb-general/files/02proxy @@ -0,0 +1 @@ +Acquire::http { Proxy "http://apt.research-infrastructures.eu:9999"; }; diff --git a/ubuntu-deb-general/files/10-ipv6-disable.conf.modprobe b/ubuntu-deb-general/files/10-ipv6-disable.conf.modprobe new file mode 100644 index 00000000..d7a58eaf --- /dev/null +++ b/ubuntu-deb-general/files/10-ipv6-disable.conf.modprobe @@ -0,0 +1,3 @@ +# Disable IPv6 +alias net-pf-10 off +alias ipv6 off diff --git a/ubuntu-deb-general/files/10-ipv6-disable.conf.sysctl b/ubuntu-deb-general/files/10-ipv6-disable.conf.sysctl new file mode 100644 index 00000000..5af98463 --- /dev/null +++ b/ubuntu-deb-general/files/10-ipv6-disable.conf.sysctl @@ -0,0 +1,4 @@ +# Disable IPv6 +net.ipv6.conf.all.disable_ipv6 = 1 +net.ipv6.conf.default.disable_ipv6 = 1 +net.ipv6.conf.lo.disable_ipv6 = 1 diff --git a/ubuntu-deb-general/files/resolv.conf b/ubuntu-deb-general/files/resolv.conf new file mode 100644 index 00000000..38718074 --- /dev/null +++ b/ubuntu-deb-general/files/resolv.conf @@ -0,0 +1,5 @@ +search research-infrastructures.eu isti.cnr.it +nameserver 146.48.122.10 +nameserver 146.48.80.4 +nameserver 146.48.80.3 +timeout: 1 diff --git a/ubuntu-deb-general/handlers/main.yml b/ubuntu-deb-general/handlers/main.yml new file mode 100644 index 00000000..cbb26546 --- /dev/null +++ b/ubuntu-deb-general/handlers/main.yml @@ -0,0 +1,20 @@ +--- +- name: Restart denyhosts + service: name=denyhosts state=restarted + +- name: reload munin-node + service: name=munin-node state=restarted + +- name: Enable libvirt munin plugins + command: munin-libvirt-plugins-detect ; /etc/init.d/munin-node restart + +# NB: to be tested. Better to transform it in a module +- name: Update the list of active munin plugins + command: munin-node-configure --suggest --shell | sh ; /etc/init.d/munin-node restart + +- name: apt update + apt: update_cache=yes + +- name: Restart rsyslog + service: name=rsyslog state=restarted + diff --git a/ubuntu-deb-general/meta/main.yml b/ubuntu-deb-general/meta/main.yml new file mode 100644 index 00000000..6af20c9e --- /dev/null +++ b/ubuntu-deb-general/meta/main.yml @@ -0,0 +1,6 @@ +--- +dependencies: + - role: '../../library/roles/deb-apt-setup' + - role: '../../library/roles/timezone' + - role: '../../library/roles/deb-set-locale' + - role: '../../library/roles/fail2ban' diff --git a/ubuntu-deb-general/tasks/apt-proxy.yml b/ubuntu-deb-general/tasks/apt-proxy.yml new file mode 100644 index 00000000..50c987ac --- /dev/null +++ b/ubuntu-deb-general/tasks/apt-proxy.yml @@ -0,0 +1,6 @@ +--- +- name: setup apt cache + template: src=02proxy.j2 dest=/etc/apt/apt.conf.d/02proxy + when: has_apt + tags: + - aptproxy diff --git a/ubuntu-deb-general/tasks/apt-setup.yml b/ubuntu-deb-general/tasks/apt-setup.yml new file mode 100644 index 00000000..898ac983 --- /dev/null +++ b/ubuntu-deb-general/tasks/apt-setup.yml @@ -0,0 +1,20 @@ +--- +# First things first: install python-apt with a raw command +- name: install python-apt + raw: "apt-get update; apt-get install -y python-apt lsb-release" + when: has_apt + tags: + - pythonapt + +- name: Install python-software-properties + apt: pkg=python-software-properties state=installed + when: has_apt + tags: + - pythonapt + +- name: Install software-properties-common on quantal distributions + apt: pkg=software-properties-common state=installed + when: is_quantal + tags: + - pythonapt + diff --git a/ubuntu-deb-general/tasks/denyhost.yml b/ubuntu-deb-general/tasks/denyhost.yml new file mode 100644 index 00000000..f8be3b08 --- /dev/null +++ b/ubuntu-deb-general/tasks/denyhost.yml @@ -0,0 +1,34 @@ +--- +- name: install denyhosts on debian <=7 and ubuntu <= 12.04 + apt: pkg={{ item }} state=installed + when: is_ubuntu_less_than_trusty + with_items: + - denyhosts + when: is_not_trusty + tags: + - denyhosts + +- name: ensure CM can access the VMs + action: | + lineinfile name=/etc/hosts.allow regexp="sshd: 146.48.123.18$" line="sshd: 146.48.123.18" + when: is_not_trusty + tags: + - denyhosts + +- name: ensure Monitoring can connect via ssh + action: | + lineinfile name=/etc/hosts.allow regexp="sshd: 146.48.123.23$" line="sshd: 146.48.123.23" + when: is_not_trusty + tags: + - denyhosts + - monitoring + +- name: Set the treshold for root on the denyhosts config file + lineinfile: | + name=/etc/denyhosts.conf regexp="^DENY_THRESHOLD_ROOT = " line="DENY_THRESHOLD_ROOT = 5" + when: is_not_trusty + tags: + - denyhosts + notify: + - Restart denyhosts + diff --git a/ubuntu-deb-general/tasks/disable-ipv6-old-servers.yml b/ubuntu-deb-general/tasks/disable-ipv6-old-servers.yml new file mode 100644 index 00000000..18a6dcc2 --- /dev/null +++ b/ubuntu-deb-general/tasks/disable-ipv6-old-servers.yml @@ -0,0 +1,31 @@ +--- +- file: dest=/etc/modprobe.d/00-ipv6-disable.conf state=absent + when: is_debian_less_than6 + tags: + - ipv6 + +- file: dest=/etc/modutils/disable-ipv6 state=absent + when: is_debian_less_than6 + tags: + - ipv6 + +- file: dest=/etc/sysctl.d/10-ipv6-disable.conf state=absent + when: is_debian_less_than6 + tags: + - ipv6 + +- lineinfile: name=/etc/modprobe.d/aliases regexp="^alias net-pf-10.*$" line="alias net-pf-10 off" + when: is_debian_less_than6 + tags: + - ipv6 + +- lineinfile: name=/etc/modprobe.d/aliases regexp="^alias ipv6.*$" line="alias ipv6 off" + when: is_debian_less_than6 + tags: + - ipv6 + +- action: down_ipv6_addresses action=remove + when: is_debian_less_than6 + tags: + - ipv6 + diff --git a/ubuntu-deb-general/tasks/install_external_ca_cert.yml b/ubuntu-deb-general/tasks/install_external_ca_cert.yml new file mode 100644 index 00000000..b74e0354 --- /dev/null +++ b/ubuntu-deb-general/tasks/install_external_ca_cert.yml @@ -0,0 +1,6 @@ +--- +- name: Install the INFN CA certificate + get_url: url=https://security.fi.infn.it/CA/mgt/INFNCA.pem dest=/etc/ssl/certs/infn-ca.pem + tags: + - ca + diff --git a/ubuntu-deb-general/tasks/main.yml b/ubuntu-deb-general/tasks/main.yml new file mode 100644 index 00000000..aa9e9a9e --- /dev/null +++ b/ubuntu-deb-general/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- include: apt-proxy.yml +- include: resolvconf.yml + when: install_resolvconf +- include: packages.yml +- include: remove-unneeded-pkgs.yml +- include: manage-ipv6-status.yml + when: is_not_debian_less_than_6 +- include: disable-ipv6-old-servers.yml + when: disable_ipv6 +- include: denyhost.yml + when: is_not_trusty +- include: munin.yml + when: configure_munin +- include: pubkeys.yml + when: manage_root_ssh_keys +- include: rsyslogfix.yml + when: is_precise and ansible_kernel != "3.2.0-4-amd64" +- include: unattended-upgrades.yml +- include: install_external_ca_cert.yml +- include: set-hostname.yml + when: hostname is defined + diff --git a/ubuntu-deb-general/tasks/manage-ipv6-status.yml b/ubuntu-deb-general/tasks/manage-ipv6-status.yml new file mode 100644 index 00000000..5c0164f4 --- /dev/null +++ b/ubuntu-deb-general/tasks/manage-ipv6-status.yml @@ -0,0 +1,15 @@ +--- +- name: Ensure that the /etc/sysctl.d directory exists + file: path=/etc/sysctl.d state=directory + when: is_not_debian_less_than_6 + tags: ipv6 + +- name: Manage the in kernel ipv6 support + sysctl: name={{ item }} value={{ ipv6_sysctl_value }} sysctl_file={{ ipv6_sysctl_file }} reload=yes state=present + with_items: + - net.ipv6.conf.all.disable_ipv6 + - net.ipv6.conf.default.disable_ipv6 + - net.ipv6.conf.lo.disable_ipv6 + when: is_not_debian_less_than_6 + tags: ipv6 + diff --git a/ubuntu-deb-general/tasks/munin.yml b/ubuntu-deb-general/tasks/munin.yml new file mode 100644 index 00000000..b5ee4db4 --- /dev/null +++ b/ubuntu-deb-general/tasks/munin.yml @@ -0,0 +1,24 @@ +--- +- name: install munin-node + apt: pkg=munin-node state=installed + when: has_apt + tags: + - packages + - munin + +- name: munin-node ACLs + lineinfile: name=/etc/munin/munin-node.conf line={{ item }} + with_items: + - '"allow ^146\.48\.87\.88$"' + - '"allow ^146\.48\.122\.15$"' + - '"allow ^146\.48\.123\.23$"' + register: munin_config + tags: + - munin + +- name: reload munin-node + service: name=munin-node state=restarted + when: munin_config.changed == True + tags: + - munin + diff --git a/ubuntu-deb-general/tasks/packages.yml b/ubuntu-deb-general/tasks/packages.yml new file mode 100644 index 00000000..82956e1c --- /dev/null +++ b/ubuntu-deb-general/tasks/packages.yml @@ -0,0 +1,86 @@ +--- +- name: install iotop backport PPA + copy: src=files/hardy-iotop-ppa dest=/etc/apt/sources.list.d/iotop-ppa.list + when: is_hardy + notify: apt update + tags: + - packages + +- name: GPG key for iotop backport PPA + shell: apt-key list | grep -q 4B1E287796DD5C9A || gpg --keyserver keyserver.ubuntu.com --recv 4B1E287796DD5C9A; gpg --export --armor 4B1E287796DD5C9A | sudo apt-key add - + when: is_hardy + tags: + - packages + +- name: Install the basic packages + apt: pkg=python-software-properties state=installed + when: has_apt + tags: + - packages + +- name: Install software-properties-common if needed + apt: pkg=software-properties-common state=installed + when: is_quantal + tags: + - packages + +- name: Install the backports repository on debian 6 + apt_repository: repo='deb http://http.debian.net/debian-backports squeeze-backports main' state=present + register: update_apt_cache + when: is_debian6 + tags: + - squeeze-backports + +- name: Install the squeeze-lts repository on debian 6 + apt_repository: repo='deb http://http.debian.net/debian squeeze-lts main contrib non-free' state=present + register: update_apt_cache + when: is_debian6 + tags: + - squeeze-lts + +- name: Install the backports repository on debian 7 + apt_repository: repo='deb http://http.debian.net/debian wheezy-backports main' state=present + register: update_apt_cache + when: is_debian7 + tags: + - wheezy-backports + +- name: apt key for the internal ppa repository + apt_key: url=http://ppa.research-infrastructures.eu/system/keys/system-archive.asc state=present + when: is_ubuntu + tags: + - packages + +- name: setup system apt repository + apt_repository: repo='deb http://ppa.research-infrastructures.eu/system stable main' + register: update_apt_cache + when: is_ubuntu + tags: + - packages + +- name: Update the apt cache + apt: update_cache=yes + when: update_apt_cache.changed + ignore_errors: True + tags: + - packages + +- name: install common packages + apt: pkg={{ item }} state=installed + when: has_apt + with_items: common_packages + tags: + - packages + +- name: Install the ntp server + apt: pkg=ntp state=installed + tags: + - packages + - ntp + +- name: Ensure that the ntp server is running + service: name=ntp state=started + tags: + - packages + - ntp + diff --git a/ubuntu-deb-general/tasks/pubkeys.yml b/ubuntu-deb-general/tasks/pubkeys.yml new file mode 100644 index 00000000..cfdc6434 --- /dev/null +++ b/ubuntu-deb-general/tasks/pubkeys.yml @@ -0,0 +1,13 @@ +--- +# TODO: fetch the keys from ldap +- name: various pub ssh keys for users and apps + authorized_key: user=root key="{{ item }}" state=present + with_items: root_ssh_keys + tags: + - root_pubkeys + +- name: Remove obsolete keys from the authorized ones + authorized_key: user=root key="{{ item }}" state=absent + with_items: obsolete_root_ssh_keys + tags: + - root_pubkeys diff --git a/ubuntu-deb-general/tasks/remove-unneeded-pkgs.yml b/ubuntu-deb-general/tasks/remove-unneeded-pkgs.yml new file mode 100644 index 00000000..ccd0bc0d --- /dev/null +++ b/ubuntu-deb-general/tasks/remove-unneeded-pkgs.yml @@ -0,0 +1,37 @@ +--- +- name: Remove unneeded base packages + apt: pkg={{ item }} state=removed + with_items: cleanup_base_packages + when: cleanup_base_packages + tags: + - packages + - pkg_cleanup + +- name: Remove unneeded X packages + apt: pkg={{ item }} state=removed + with_items: x_base_packages_to_remove + when: cleanup_x_base_packages + tags: + - packages + - pkg_cleanup + +- name: Remove the nfs packages + apt: pkg={{ item }} state=removed + with_items: nfs_packages + when: + - is_not_precise + - cleanup_nfs_packages + tags: + - packages + - pkg_cleanup + +- name: Remove rpcbind packages + apt: pkg={{ item }} state=removed + with_items: rpcbind_packages + when: + - "({{ is_precise }}) or ({{ is_debian7 }})" + - cleanup_rpcbind_packages + tags: + - packages + - pkg_cleanup + diff --git a/ubuntu-deb-general/tasks/resolvconf.yml b/ubuntu-deb-general/tasks/resolvconf.yml new file mode 100644 index 00000000..fd31205d --- /dev/null +++ b/ubuntu-deb-general/tasks/resolvconf.yml @@ -0,0 +1,3 @@ +--- +- copy: src=resolv.conf dest=/etc/resolv.conf owner=root group=root mode=0644 + diff --git a/ubuntu-deb-general/tasks/rsyslogfix.yml b/ubuntu-deb-general/tasks/rsyslogfix.yml new file mode 100644 index 00000000..7202091d --- /dev/null +++ b/ubuntu-deb-general/tasks/rsyslogfix.yml @@ -0,0 +1,20 @@ +--- +- name: Fix rsyslog behaviour on some ubuntu machines disabling the kernel logger + lineinfile: dest=/etc/rsyslog.conf regexp="\\$ModLoad\ imklog" line="#$ModLoad imklog" backup=yes + when: + - is_precise and ansible_kernel != "3.2.0-4-amd64" + - is_not_trusty + notify: + Restart rsyslog + tags: + - rsyslog + +- name: Enable the kernel logger on ubuntu 12.04 and kernel major version >= 3 + lineinfile: dest=/etc/rsyslog.conf line="$ModLoad imklog" insertafter="^#$ModLoad imklog" backup=yes + when: + - is_precise and ansible_kernel == "3.2.0-4-amd64" + - is_trusty + notify: + Restart rsyslog + tags: + - rsyslog diff --git a/ubuntu-deb-general/tasks/set-hostname.yml b/ubuntu-deb-general/tasks/set-hostname.yml new file mode 100644 index 00000000..335b345a --- /dev/null +++ b/ubuntu-deb-general/tasks/set-hostname.yml @@ -0,0 +1,6 @@ +--- +- name: Set the hostname + hostname: name={{ hostname }} + when: hostname is defined + tags: bootstrap + diff --git a/ubuntu-deb-general/tasks/unattended-upgrades.yml b/ubuntu-deb-general/tasks/unattended-upgrades.yml new file mode 100644 index 00000000..df7cb4ec --- /dev/null +++ b/ubuntu-deb-general/tasks/unattended-upgrades.yml @@ -0,0 +1,14 @@ +--- +- name: Install the unattended-upgrades package. We use it to manage security fix. + apt: pkg=unattended-upgrades state=latest + tags: + - packages + - upgrades + - unattended + +- name: Install the unattended-upgrades configuration + template: src=50unattended-upgrades.j2 dest=/etc/apt/apt.conf.d/50unattended-upgrades owner=root group=root mode=0444 + tags: + - packages + - upgrades + - unattended diff --git a/ubuntu-deb-general/templates/02proxy.j2 b/ubuntu-deb-general/templates/02proxy.j2 new file mode 100644 index 00000000..ceb33faf --- /dev/null +++ b/ubuntu-deb-general/templates/02proxy.j2 @@ -0,0 +1,3 @@ +{% if use_apt_proxy %} +Acquire::http { Proxy "{{ apt_proxy_url }}"; }; +{% endif %} \ No newline at end of file diff --git a/ubuntu-deb-general/templates/50unattended-upgrades.j2 b/ubuntu-deb-general/templates/50unattended-upgrades.j2 new file mode 100644 index 00000000..abdc48ac --- /dev/null +++ b/ubuntu-deb-general/templates/50unattended-upgrades.j2 @@ -0,0 +1,60 @@ +// Automatically upgrade packages from these (origin:archive) pairs +Unattended-Upgrade::Allowed-Origins { +{%for allowed in unatt_allowed_origins %} + "{{ allowed }}"; +{%endfor %} +}; + +// List of packages to not update (regexp are supported) +Unattended-Upgrade::Package-Blacklist { +{% if unatt_blacklisted is defined %} +{%for pkg in unatt_blacklisted %} + "{{ pkg }}"; +{%endfor %} +{% endif %} +}; + +// This option allows you to control if on a unclean dpkg exit +// unattended-upgrades will automatically run +// dpkg --force-confold --configure -a +// The default is true, to ensure updates keep getting installed +Unattended-Upgrade::AutoFixInterruptedDpkg "{{ unatt_autofix }}"; + +// Split the upgrade into the smallest possible chunks so that +// they can be interrupted with SIGUSR1. This makes the upgrade +// a bit slower but it has the benefit that shutdown while a upgrade +// is running is possible (with a small delay) +Unattended-Upgrade::MinimalSteps "{{ unatt_minimalsteps }}"; + +// Install all unattended-upgrades when the machine is shuting down +// instead of doing it in the background while the machine is running +// This will (obviously) make shutdown slower +Unattended-Upgrade::InstallOnShutdown "{{ unatt_install_on_shutdown }}"; + +{% if unatt_email is defined %} +// Send email to this address for problems or packages upgrades +// If empty or unset then no email is sent, make sure that you +// have a working mail setup on your system. A package that provides +// 'mailx' must be installed. E.g. "user@example.com" +Unattended-Upgrade::Mail "{{ unatt_email }}"; +// Set this value to "true" to get emails only on errors. Default +// is to always send a mail if Unattended-Upgrade::Mail is set +Unattended-Upgrade::MailOnlyOnError "{{ unatt_email_on_error }}"; +{% endif %} + +// Do automatic removal of new unused dependencies after the upgrade +// (equivalent to apt-get autoremove) +Unattended-Upgrade::Remove-Unused-Dependencies "{{ unatt_autoremove }}"; + +// Automatically reboot *WITHOUT CONFIRMATION* +// if the file /var/run/reboot-required is found after the upgrade +Unattended-Upgrade::Automatic-Reboot "{{ unatt_autoreboot }}"; + +// If automatic reboot is enabled and needed, reboot at the specific +// time instead of immediately +// Default: "now" +Unattended-Upgrade::Automatic-Reboot-Time "{{ unatt_autoreboot_time }}"; + +// Use apt bandwidth limit feature, this example limits the download +// speed to 70kb/sec +//Acquire::http::Dl-Limit "70"; diff --git a/ubuntu-deb-general/templates/send_nsca.j2 b/ubuntu-deb-general/templates/send_nsca.j2 new file mode 100644 index 00000000..aa2308c6 --- /dev/null +++ b/ubuntu-deb-general/templates/send_nsca.j2 @@ -0,0 +1,2 @@ +password={{ nsca_password }} +decryption_method={{ nsca_encryption }} diff --git a/users/defaults/main.yml b/users/defaults/main.yml new file mode 100644 index 00000000..ced4cea7 --- /dev/null +++ b/users/defaults/main.yml @@ -0,0 +1,8 @@ +--- +users_sudoers_group: wheel +users_sudoers_create_group: False +users_sudoers_create_sudo_conf: False +users_home_dir: /home +#users_system_users: +# - { login: 'adellam', name: "Andrea Dell'Amico", home: '{{ users_home_dir }}, createhome: 'yes', ssh_key: '{{ adellam_ssh_key }}', shell: '/bin/bash', admin: 'True' } + diff --git a/users/tasks/main.yml b/users/tasks/main.yml new file mode 100644 index 00000000..33c9149d --- /dev/null +++ b/users/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Create the sudoers group if needed + group: name={{ users_sudoers_group }} state=present + when: users_sudoers_create_group + tags: + - users + +- name: Add a sudo additional configuration for the new sudoers group + template: src=sudoers.j2 dest=/etc/sudoers.d/{{ users_sudoers_group }} + when: users_sudoers_create_sudo_conf + tags: + - users + +- name: Create users + user: name={{ item.login }} comment="{{ item.name }}" home={{ item.home }}/{{ item.login }} createhome={{ item.createhome }} shell={{ item.shell }} + with_items: users_system_users + when: + - users_system_users is defined + tags: + - users + +- name: ensure that the users can login with their ssh keys + authorized_key: user="{{ item.login }}" key="{{ item.ssh_key }}" state=present + with_items: users_system_users + when: + - users_system_users is defined + - item.ssh_key is defined + tags: + - users + +- name: Add the admin users to the sudoers group + user: name={{ item.login }} groups={{ users_sudoers_group }} + with_items: users_system_users + when: + - users_system_users is defined + tags: + - users diff --git a/users/templates/sudoers.j2 b/users/templates/sudoers.j2 new file mode 100644 index 00000000..0bef21c0 --- /dev/null +++ b/users/templates/sudoers.j2 @@ -0,0 +1 @@ +%{{ users_sudoers_group }} ALL=(ALL) ALL diff --git a/varnish-cache/defaults/main.yml b/varnish-cache/defaults/main.yml new file mode 100644 index 00000000..331e3db1 --- /dev/null +++ b/varnish-cache/defaults/main.yml @@ -0,0 +1,28 @@ +--- +varnish_version: 4.0 +varnish_repo: True +varnish_repo_requirements: + - apt-transport-https +varnish_repo_url: "https://repo.varnish-cache.org/debian/ wheezy varnish-{{ varnish_version }}" +varnish_pkg_name: varnish +varnish_pkg_state: present +varnish_enabled: True + + +varnish_listen_port: 6810 +varnish_static_c_timeout: 240s +varnish_static_first_byte_timeout: 360s +varnish_static_between_bytes_timeout: 360s +# We are using 3000 in production +varnish_static_max_connections: 200 +varnish_storage_file: /var/lib/varnish/varnish_storage.bin +# We are using 12288M in production +varnish_storage_size: 1G +# Expressed in MBs. We do not use it right now +varnish_ram_cache_size: 512 +# We are using 48000 in production +varnish_ttl: 120 +varnish_user: varnish +varnish_group: varnish +varnish_purge_whitelist: + - 127.0.0.1 diff --git a/varnish-cache/files/varnish-sepol.te b/varnish-cache/files/varnish-sepol.te new file mode 100644 index 00000000..ac912d51 --- /dev/null +++ b/varnish-cache/files/varnish-sepol.te @@ -0,0 +1,11 @@ + +module varnish-sepol 1.0; + +require { + type varnishd_t; + class capability { fowner fsetid }; +} + +#============= varnishd_t ============== +allow varnishd_t self:capability fowner; +allow varnishd_t self:capability fsetid; diff --git a/varnish-cache/handlers/main.yml b/varnish-cache/handlers/main.yml new file mode 100644 index 00000000..9a6c15ce --- /dev/null +++ b/varnish-cache/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Reload varnish + service: name=varnish state=reloaded + + diff --git a/varnish-cache/tasks/main.yml b/varnish-cache/tasks/main.yml new file mode 100644 index 00000000..253bac6c --- /dev/null +++ b/varnish-cache/tasks/main.yml @@ -0,0 +1,49 @@ +--- +- name: Install the required packages needed by the external varnish repo + apt: pkg={{ item }} state=present + with_items: varnish_repo_requirements + when: varnish_repo + tags: varnish + +- name: Get the varnish repo key + apt_key: url=https://repo.varnish-cache.org/GPG-key.txt state=present + when: varnish_repo + register: varnish_repo_list + tags: varnish + +- name: Define the varnish repository + apt_repository: repo='deb {{ varnish_repo_url }}' state=present + when: varnish_repo + register: varnish_repo_list + tags: varnish + +- name: Update the apt cache if needed + apt: update_cache=yes + when: ( varnish_repo_list | changed ) + tags: varnish + +- name: Install the varnish package + apt: pkg={{ item }} state={{ varnish_pkg_state }} + with_items: varnish_pkg_name + tags: varnish + +# - name: Install the varnish parameters file. The config file needs to be set by a local task +# template: src={{ item }}.j2 dest=/etc/varnish/{{ item }} owner=root group=root mode=0444 +# with_items: +# - varnish.params +# notify: Reload varnish +# tags: +# - varnish +# - varnishconf + +# - name: Ensure that the varnish service is started and enabled +# service: name=varnish state=started enabled=yes +# when: varnish_enabled +# tags: +# - varnish + +# - name: Ensure that the varnish service is stopped and disabled +# service: name=varnish state=stopped enabled=no +# when: not varnish_enabled +# tags: +# - varnish diff --git a/varnish-cache/templates/default.vcl.j2 b/varnish-cache/templates/default.vcl.j2 new file mode 100644 index 00000000..3563bec4 --- /dev/null +++ b/varnish-cache/templates/default.vcl.j2 @@ -0,0 +1,162 @@ +# This is a basic VCL configuration file for varnish. See the vcl(7) +# man page for details on VCL syntax and semantics. +# +# Default backend definition. Set this to point to your content +# server. +# +vcl 4.0; +import directors; +import std; +# +# Generic probe used by all the backends +probe healthcheck { + .url = "/index.php"; + .interval = 250ms; + .timeout = 10s; + .window = 1; + .threshold = 1; +} + +# +# Application backends +# They are also a fallback for the static content +# +backend a5_dev { + .host = "127.0.0.1"; + .port = "80"; + .probe = healthcheck; + .connect_timeout = {{ varnish_static_c_timeout }}; + .first_byte_timeout = {{ varnish_static_first_byte_timeout }}; + .between_bytes_timeout = {{ varnish_static_between_bytes_timeout }}; + .max_connections = {{ varnish_static_max_connections }}; +} + +sub vcl_init { + new a5dev_cluster = directors.hash(); + a5dev_cluster.add_backend(a5_dev, 1.0); + + new a5admin_cluster = directors.fallback(); + a5admin_cluster.add_backend(a5_dev); +} + + +# Respond to incoming requests. +sub vcl_recv { + # Add a unique header containing the client address + # NB: it's the default, no need to explicitly add the X-Forwarded-For header +# unset req.http.X-Forwarded-For; +# set req.http.X-Forwarded-For = client.ip; + if (req.url ~ "^/backend/" || req.url ~ "^/admin(.*)" || req.url ~ "(?i)\.gupld" ) { + set req.backend_hint = a5admin_cluster.backend(); + return(pipe); + } + if (req.url ~ "^/cloud-assets/" && !req.url ~ "(?i)\.nocache\.(.*)" ) { + set req.backend_hint = a5dev_cluster.backend(client.identity); + unset req.http.Cookie; + } + else { + set req.backend_hint = a5dev_cluster.backend(client.identity); + } + + # Always cache the following file types for all users. + if (req.url ~ "(?i)\.(avi|mpeg|webm|vorbis|vob|mp4|divx|mp3|flac|ogg|ogv|png|gif|jpeg|jpg|ico|swf|css|js|html|htm)(\?[a-z0-9]+)?$" && !req.url ~ "(?i)\.nocache\.(.*)") { + unset req.http.Cookie; +# return(hash); + } + # Handle compression correctly. Different browsers send different + # "Accept-Encoding" headers, even though they mostly all support the same + # compression mechanisms. By consolidating these compression headers into + # a consistent format, we can reduce the size of the cache and get more hits. + # @see: http:// varnish.projects.linpro.no/wiki/FAQ/Compression + if (req.http.Accept-Encoding) { + if (req.http.Accept-Encoding ~ "gzip") { + # If the browser supports it, we'll use gzip. + set req.http.Accept-Encoding = "gzip"; + } + else if (req.http.Accept-Encoding ~ "deflate") { + # Next, try deflate if it is supported. + set req.http.Accept-Encoding = "deflate"; + } + else { + # Unknown algorithm. Remove it and send unencoded. + unset req.http.Accept-Encoding; + } + } +} + +# Code determining what to do when serving items from the backend servers. +sub vcl_backend_response { + if (beresp.http.X-No-Cache) { + set beresp.uncacheable = true; + set beresp.ttl = 120s; + return (deliver); + } + set beresp.http.X-Backend = beresp.backend.name; +# Don't cache all errors + if(beresp.status >= 300 && beresp.status <= 399) { + set beresp.ttl = 5m; + } + if(beresp.status >= 399 && beresp.status <= 403) { + set beresp.uncacheable = true; + set beresp.ttl = 10s; + return(deliver); + } + if (beresp.status == 404) { + set beresp.ttl = 30m; + } + if (beresp.status >= 405) { + set beresp.uncacheable = true; + set beresp.ttl = 0s; + return(deliver); + } + if (beresp.http.Content-Length && beresp.http.Content-Length ~ "^[0-9]$") { + #log "TooSmall: Pass on ( " req.url " ) small objects: " beresp.http.Content-Length ; + set beresp.uncacheable = true; + unset beresp.http.expires; + set beresp.ttl = 0s; + return(deliver); + } + if(bereq.url == "/robots.txt") { + # Robots.txt is updated rarely and should be cached for a lot of time + # Ban manually as required + set beresp.ttl = 1d; + } + if ((bereq.url ~ "^/cloud-assets/" || bereq.url ~ "(?i)\.(avi|mpeg|webm|vorbis|vob|mp4|divx|mp3|flac|ogg|ogv|png|gif|jpeg|jpg|ico|swf|css|js|html|htm)(\?[a-z0-9]+)?$") && !bereq.url ~ "(?i)\.nocache\.(.*)") { + unset beresp.http.expires; + unset beresp.http.set-cookie; + /* Set the clients TTL on this object */ + set beresp.http.cache-control = "max-age=604800"; + /* marker for vcl_deliver to reset Age: */ + set beresp.http.magicmarker = "1"; + set beresp.ttl = 6w; + } + set beresp.grace = 6h; +} + +sub vcl_hit { + if (obj.ttl >= 0s) { + // A pure unadultered hit, deliver it + return (deliver); + } + if (!std.healthy(req.backend_hint) && (obj.ttl + obj.grace > 0s)) { + return (deliver); + } else { + return (fetch); + } +} + +sub vcl_deliver { + if (resp.http.magicmarker) { + /* Remove the magic marker */ + unset resp.http.magicmarker; + + /* By definition we have a fresh object */ + set resp.http.age = "0"; + } +} + +sub vcl_pipe { + # http://www.varnish-cache.org/ticket/451 + # This forces every pipe request to be the first one. + set bereq.http.connection = "close"; +} diff --git a/varnish-cache/templates/varnish.params.j2 b/varnish-cache/templates/varnish.params.j2 new file mode 100644 index 00000000..6de1e244 --- /dev/null +++ b/varnish-cache/templates/varnish.params.j2 @@ -0,0 +1,84 @@ +# Configuration file for varnish +# +# /etc/init.d/varnish expects the variables $DAEMON_OPTS, $NFILES and $MEMLOCK +# to be set from this shell script fragment. +# +# Set this to 1 to make systemd reload try to switch vcl without restart. +RELOAD_VCL=1 + +# Maximum number of open files (for ulimit -n) +NFILES=131072 + +# Maximum locked memory size (for ulimit -l) +# Used for locking the shared memory log in memory. If you increase log size, +# you need to increase this number as well +MEMLOCK=82000 + +# Default varnish instance name is the local nodename. Can be overridden with +# the -n switch, to have more instances on a single server. +INSTANCE=$(uname -n) + + +## Alternative 3, Advanced configuration +# +# See varnishd(1) for more information. +# +# # Main configuration file. You probably want to change it :) +VARNISH_VCL_CONF=/etc/varnish/default.vcl +# +# # Default address and port to bind to +# # Blank address means all IPv4 and IPv6 interfaces, otherwise specify +# # a host name, an IPv4 dotted quad, or an IPv6 address in brackets. +# VARNISH_LISTEN_ADDRESS= +VARNISH_LISTEN_PORT={{ varnish_listen_port }} +# +# # Telnet admin interface listen address and port +VARNISH_ADMIN_LISTEN_ADDRESS=127.0.0.1 +VARNISH_ADMIN_LISTEN_PORT=6082 +# +# Shared secret file for admin interface +VARNISH_SECRET_FILE=/etc/varnish/secret + +# # The minimum number of worker threads to start +VARNISH_MIN_THREADS=2 +# +# # The Maximum number of worker threads to start +VARNISH_MAX_THREADS=500 +# +# # Idle timeout for worker threads +VARNISH_THREAD_TIMEOUT=120 +# +# # Cache file location +VARNISH_STORAGE_FILE={{ varnish_storage_file }} +# +# # Cache file size: in bytes, optionally using k / M / G / T suffix, +# # or in percentage of available disk space using the % suffix. +VARNISH_STORAGE_SIZE={{ varnish_storage_size }} +# +# +# # Backend storage specification +VARNISH_STORAGE="file,{{ varnish_storage_file }},{{ varnish_storage_size }}" +# +# # Default TTL used when the backend does not specify one +VARNISH_TTL={{ varnish_ttl }} +# +# User and group for the varnishd worker processes +VARNISH_USER={{ varnish_user }} +VARNISH_GROUP={{ varnish_group }} +# # sure you update this section, too. +# DAEMON_OPTS="-a ${VARNISH_LISTEN_ADDRESS}:${VARNISH_LISTEN_PORT} \ +# -f ${VARNISH_VCL_CONF} \ +# -T ${VARNISH_ADMIN_LISTEN_ADDRESS}:${VARNISH_ADMIN_LISTEN_PORT} \ +# -t ${VARNISH_TTL} \ +# -w ${VARNISH_MIN_THREADS},${VARNISH_MAX_THREADS},${VARNISH_THREAD_TIMEOUT} \ +# -S ${VARNISH_SECRET_FILE} \ +# -s ${VARNISH_STORAGE} \ +# -p sess_workspace=262144 \ +# -p default_keep=${VARNISH_TTL} \ +# -p sess_timeout=360 \ +# -p thread_pools=1 \ +# -p thread_pool_min=200 -p thread_pool_max=4000 \ +# -p thread_pool_add_delay=2 -p session_linger=100 \ +# -s file,{{ varnish_ram_cache_size }}M +# " + diff --git a/vsftpd/defaults/main.yml b/vsftpd/defaults/main.yml new file mode 100644 index 00000000..16fc3000 --- /dev/null +++ b/vsftpd/defaults/main.yml @@ -0,0 +1,16 @@ +--- +vsftpd_anonymous_enable: "NO" +vsftpd_local_enable: "YES" +vsftpd_write_enable: "YES" +vsftpd_local_umask: "077" +vsftpd_dirmessage_enable: "YES" +vsftpd_connect_from_port_20: "YES" +vsftpd_ls_recurse_enable: "NO" +vsftpd_local_root: /dev/null +vsftpd_pasv_min_port: 49152 +vsftpd_pasv_max_port: 65534 +vsftpd_chroot_list_enable: "YES" +vsftpd_chroot_list_file: /etc/vsftpd.chroot_list +vsftpd_iptables_rules: False +vsftpd_iptables_allowed_hosts: + - 0.0.0.0/0 diff --git a/vsftpd/handlers/main.yml b/vsftpd/handlers/main.yml new file mode 100644 index 00000000..6abae0b6 --- /dev/null +++ b/vsftpd/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: Start the vsftpd server + service: name=vsftpd state=started enabled=yes + +- name: Stop the vsftpd server + service: name=vsftpd state=stopped + +- name: Restart the vsftpd server + service: name=vsftpd state=restarted + +- name: Reload the vsftpd server + service: name=vsftpd state=reloaded diff --git a/vsftpd/tasks/main.yml b/vsftpd/tasks/main.yml new file mode 100644 index 00000000..9a9783f4 --- /dev/null +++ b/vsftpd/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Install the vsftpd package + apt: pkg=vsftpd state=installed + tags: + - vsftpd + - ftp + +- name: Install the vsftpd configuration file + template: src=vsftpd.conf.j2 dest=/etc/vsftpd.conf mode=0444 owner=root group=root + notify: Restart the vsftpd server + tags: + - vsftpd + - ftp + diff --git a/vsftpd/templates/vsftpd.conf.j2 b/vsftpd/templates/vsftpd.conf.j2 new file mode 100644 index 00000000..ebbbef14 --- /dev/null +++ b/vsftpd/templates/vsftpd.conf.j2 @@ -0,0 +1,129 @@ +# +# Run standalone? vsftpd can run either from an inetd or as a standalone +# daemon started from an initscript. +listen=YES +# +# Run standalone with IPv6? +# Like the listen parameter, except vsftpd will listen on an IPv6 socket +# instead of an IPv4 one. This parameter and the listen parameter are mutually +# exclusive. +#listen_ipv6=YES +# +# Allow anonymous FTP? (Beware - allowed by default if you comment this out). +anonymous_enable={{ vsftpd_anonymous_enable }} +# +# Uncomment this to allow local users to log in. +local_enable={{ vsftpd_local_enable }} +# +# Uncomment this to enable any form of FTP write command. +write_enable={{ vsftpd_write_enable }} +# +# Default umask for local users is 077. You may wish to change this to 022, +# if your users expect that (022 is used by most other ftpd's) +local_umask={{ vsftpd_local_umask }} +# +# Uncomment this to allow the anonymous FTP user to upload files. This only +# has an effect if the above global write enable is activated. Also, you will +# obviously need to create a directory writable by the FTP user. +#anon_upload_enable=YES +# +# Uncomment this if you want the anonymous FTP user to be able to create +# new directories. +#anon_mkdir_write_enable=YES +# +# Activate directory messages - messages given to remote users when they +# go into a certain directory. +dirmessage_enable={{ vsftpd_dirmessage_enable }} +# +# If enabled, vsftpd will display directory listings with the time +# in your local time zone. The default is to display GMT. The +# times returned by the MDTM FTP command are also affected by this +# option. +use_localtime=YES +# +# Activate logging of uploads/downloads. +xferlog_enable=YES +# +# Make sure PORT transfer connections originate from port 20 (ftp-data). +connect_from_port_20={{ vsftpd_connect_from_port_20 }} +# +# If you want, you can arrange for uploaded anonymous files to be owned by +# a different user. Note! Using "root" for uploaded files is not +# recommended! +#chown_uploads=YES +#chown_username=whoever +# +# You may override where the log file goes if you like. The default is shown +# below. +#xferlog_file=/var/log/vsftpd.log +# +# If you want, you can have your log file in standard ftpd xferlog format. +# Note that the default log file location is /var/log/xferlog in this case. +#xferlog_std_format=YES +# +# You may change the default value for timing out an idle session. +#idle_session_timeout=600 +# +# You may change the default value for timing out a data connection. +#data_connection_timeout=120 +# +# It is recommended that you define on your system a unique user which the +# ftp server can use as a totally isolated and unprivileged user. +#nopriv_user=ftpsecure +# +# Enable this and the server will recognise asynchronous ABOR requests. Not +# recommended for security (the code is non-trivial). Not enabling it, +# however, may confuse older FTP clients. +#async_abor_enable=YES +# +# By default the server will pretend to allow ASCII mode but in fact ignore +# the request. Turn on the below options to have the server actually do ASCII +# mangling on files when in ASCII mode. +# Beware that on some FTP servers, ASCII support allows a denial of service +# attack (DoS) via the command "SIZE /big/file" in ASCII mode. vsftpd +# predicted this attack and has always been safe, reporting the size of the +# raw file. +# ASCII mangling is a horrible feature of the protocol. +#ascii_upload_enable=YES +#ascii_download_enable=YES +# +# You may fully customise the login banner string: +#ftpd_banner=Welcome to blah FTP service. +# +# You may specify a file of disallowed anonymous e-mail addresses. Apparently +# useful for combatting certain DoS attacks. +#deny_email_enable=YES +# (default follows) +#banned_email_file=/etc/vsftpd.banned_emails +# + +# You may activate the "-R" option to the builtin ls. This is disabled by +# default to avoid remote users being able to cause excessive I/O on large +# sites. However, some broken FTP clients such as "ncftp" and "mirror" assume +# the presence of the "-R" option, so there is a strong case for enabling it. +ls_recurse_enable={{ vsftpd_ls_recurse_enable }} +# +# Customization +# +# Some of vsftpd's settings don't fit the filesystem layout by +# default. +# +# This option should be the name of a directory which is empty. Also, the +# directory should not be writable by the ftp user. This directory is used +# as a secure chroot() jail at times vsftpd does not require filesystem +# access. +secure_chroot_dir=/var/run/vsftpd/empty +# +# This string is the name of the PAM service vsftpd will use. +pam_service_name=vsftpd +# +# This option specifies the location of the RSA certificate to use for SSL +# encrypted connections. +rsa_cert_file=/etc/ssl/private/vsftpd.pem + + +local_root={{ vsftpd_local_root }} +pasv_min_port={{ vsftpd_pasv_min_port }} +pasv_max_port={{ vsftpd_pasv_max_port }} +chroot_list_enable={{ vsftpd_chroot_list_enable }} +chroot_list_file={{ vsftpd_chroot_list_file }} diff --git a/yii/defaults/main.yml b/yii/defaults/main.yml new file mode 100644 index 00000000..1b2160e4 --- /dev/null +++ b/yii/defaults/main.yml @@ -0,0 +1,37 @@ +--- +# +# Note: in the index.php directory, the path to the yii distribution will be something like +# $yii='{{ yii_install_dir }}/yii/framework/yii.php'; +# +yii_version: 1.1.16 +yii_release: bca042 +yii_installs: + - { version: '{{ yii_version }}', release: '{{ yii_release }}' } + +yii_download_url: 'https://github.com/yiisoft/yii/releases/download/{{ yii_version }}/yii-{{ yii_version }}.{{ yii_release }}.tar.gz' + +yii_install_dir: '/opt' +yii_framework_dir: '{{ yii_install_dir }}/yii-{{ yii_version }}' + +yii_php_modules: + - php-xml-dtd + - php-xml-parser + - php-xml-serializer + - php5-imagick + - php5-memcache + - php5-xcache + - php5-gd + - php5-mcrypt + - libpcre + +yii_php_db_server_modules: + - php5-pgsql + - php5-mysql + - php5-mysqlnd + +yii_unneeded_files: + - CHANGELOG + - README + - UPGRADE + - demos + - requirements diff --git a/yii/tasks/main.yml b/yii/tasks/main.yml new file mode 100644 index 00000000..ac883819 --- /dev/null +++ b/yii/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- name: Create the yii installation directory + file: dest={{ yii_install_dir }} state=directory owner=root group=root mode=0755 + tags: [ 'yii', 'yii-framework' ] + +- name: Get the yii distribution file + get_url: url={{ yii_download_url }} dest={{ yii_install_dir }}/yii-{{ yii_version }}.{{ yii_release }}.tar.gz mode=0400 + tags: [ 'yii', 'yii-framework' ] + +- name: Unpack the yii distribution file + unarchive: src=/opt/yii-{{ yii_version }}.{{ yii_release }}.tar.gz dest={{ yii_install_dir }} copy=no + args: + creates: '{{ yii_install_dir }}/yii-{{ yii_version }}.{{ yii_release }}/framework/yii.php' + register: yii_unpack + tags: [ 'yii', 'yii-framework' ] + +- name: Fix the permissions on the yii framework directory + command: chown -R root:root {{ yii_install_dir }}/yii-{{ yii_version }}.{{ yii_release }} + when: ( yii_unpack | changed ) + tags: [ 'yii', 'yii-framework' ] + +- name: Create a couple of links to the running version + file: src={{ yii_install_dir }}/yii-{{ yii_version }}.{{ yii_release }} dest={{ item }} state=link + with_items: + - '{{ yii_framework_dir }}' + - '{{ yii_install_dir }}/yii' + tags: [ 'yii', 'yii-framework' ] + +- name: Remove the unneeded files + file: dest={{ yii_install_dir }}/yii-{{ yii_version }}.{{ yii_release }}/{{ item }} state=absent + with_items: yii_unneeded_files + tags: [ 'yii', 'yii-framework' ] + +- name: Install the php required modules + apt: name={{ item }} state=present + with_items: yii_php_modules + notify: Reload php-fpm + tags: [ 'yii', 'yii-framework' ] + +- name: Install the php modules to access the db servers + apt: name={{ item }} state=present + with_items: yii_php_db_server_modules + notify: Reload php-fpm + tags: [ 'yii', 'yii-framework' ]