ch13
authorLorin Hochstein <lorin.hochstein@sendgrid.com>
Tue, 10 Feb 2015 00:13:53 +0000 (19:13 -0500)
committerLorin Hochstein <lorin.hochstein@sendgrid.com>
Tue, 10 Feb 2015 00:13:53 +0000 (19:13 -0500)
22 files changed:
ch13/playbooks/README.md [new file with mode: 0644]
ch13/playbooks/ansible.cfg [new file with mode: 0644]
ch13/playbooks/certs/Dockerfile [new file with mode: 0644]
ch13/playbooks/certs/Makefile [new file with mode: 0644]
ch13/playbooks/inventory [new file with mode: 0644]
ch13/playbooks/library/_docker_image.py [new file with mode: 0644]
ch13/playbooks/library/docker.py [new file with mode: 0644]
ch13/playbooks/memcached/Dockerfile [new file with mode: 0644]
ch13/playbooks/memcached/Makefile [new file with mode: 0644]
ch13/playbooks/mezzanine.env [new file with mode: 0644]
ch13/playbooks/mezzanine/Dockerfile [new file with mode: 0644]
ch13/playbooks/mezzanine/Makefile [new file with mode: 0644]
ch13/playbooks/mezzanine/ansible/files/gunicorn.conf.py [new file with mode: 0644]
ch13/playbooks/mezzanine/ansible/files/local_settings.py [new file with mode: 0644]
ch13/playbooks/mezzanine/ansible/files/scripts/setadmin.py [new file with mode: 0755]
ch13/playbooks/mezzanine/ansible/files/scripts/setsite.py [new file with mode: 0755]
ch13/playbooks/mezzanine/ansible/mezzanine-container.yml [new file with mode: 0644]
ch13/playbooks/nginx/Dockerfile [new file with mode: 0644]
ch13/playbooks/nginx/Makefile [new file with mode: 0644]
ch13/playbooks/nginx/nginx.conf [new file with mode: 0644]
ch13/playbooks/run-mezzanine.yml [new file with mode: 0755]
ch13/playbooks/secrets.yml [new file with mode: 0644]

diff --git a/ch13/playbooks/README.md b/ch13/playbooks/README.md
new file mode 100644 (file)
index 0000000..2345292
--- /dev/null
@@ -0,0 +1,3 @@
+# Mezzanine Docker containers with Ansible
+
+Examples on how to use Ansible to create Docker containers to deploy Mezzanine
diff --git a/ch13/playbooks/ansible.cfg b/ch13/playbooks/ansible.cfg
new file mode 100644 (file)
index 0000000..d1b3c4c
--- /dev/null
@@ -0,0 +1,3 @@
+[defaults]
+hostfile = inventory
+gathering = explicit
diff --git a/ch13/playbooks/certs/Dockerfile b/ch13/playbooks/certs/Dockerfile
new file mode 100644 (file)
index 0000000..5ff772a
--- /dev/null
@@ -0,0 +1,15 @@
+FROM ubuntu:trusty
+MAintAINER lorin@ansiblebook.com
+
+# Create self-signed cert for 192.168.59.103
+RUN apt-get update
+RUN apt-get install -y openssl
+
+RUN mkdir /certs
+
+WORKDIR /certs
+
+RUN openssl req -new -x509 -nodes -out nginx.crt \
+   -keyout nginx.key -subj '/CN=192.168.59.103.xip.io' -days 3650
+
+VOLUME /certs
diff --git a/ch13/playbooks/certs/Makefile b/ch13/playbooks/certs/Makefile
new file mode 100644 (file)
index 0000000..80ae2cb
--- /dev/null
@@ -0,0 +1,10 @@
+.PHONY: image bash
+
+IMAGE=lorin/certs
+
+image:
+       docker build -t $(IMAGE) .
+
+bash:
+       docker run -ti $(IMAGE) /bin/bash
+
diff --git a/ch13/playbooks/inventory b/ch13/playbooks/inventory
new file mode 100644 (file)
index 0000000..2302eda
--- /dev/null
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/ch13/playbooks/library/_docker_image.py b/ch13/playbooks/library/_docker_image.py
new file mode 100644 (file)
index 0000000..21d3335
--- /dev/null
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+#
+
+# (c) 2014, Pavel Antonov <antonov@adwz.ru>
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software.  If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+DOCUMENTATION = '''
+---
+module: docker_image
+deprecated: "functions are being rolled into the 'docker' module"
+author: Pavel Antonov
+version_added: "1.5"
+short_description: manage docker images
+description:
+     - Create, check and remove docker images
+     - If DOCKER_HOST is set, will get connection info from environment variables
+options:
+  path:
+    description:
+       - Path to directory with Dockerfile
+    required: false
+    default: null
+    aliases: []
+  name:
+    description:
+       - Image name to work with
+    required: true
+    default: null
+    aliases: []
+  tag:
+    description:
+       - Image tag to work with
+    required: false
+    default: "latest"
+    aliases: []
+  nocache:
+    description:
+      - Do not use cache with building
+    required: false
+    default: false
+    aliases: []
+  docker_url:
+    description:
+      - URL of docker host to issue commands to
+    required: false
+    default: unix://var/run/docker.sock
+    aliases: []
+  state:
+    description:
+      - Set the state of the image
+    required: false
+    default: present
+    choices: [ "present", "absent", "build" ]
+    aliases: []
+  timeout:
+    description:
+      - Set image operation timeout
+    required: false
+    default: 600
+    aliases: []
+requirements: [ "docker-py" ]
+'''
+
+EXAMPLES = '''
+Build docker image if required. Path should contains Dockerfile to build image:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: check or build image
+    docker_image: path="/path/to/build/dir" name="my/app" state=present
+
+Build new version of image:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: check or build image
+    docker_image: path="/path/to/build/dir" name="my/app" state=build
+
+Remove image from local docker storage:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: remove image
+    docker_image: name="my/app" state=absent
+
+'''
+
+try:
+    import os
+    import sys
+    import re
+    import json
+    import docker.client
+    import docker.utils
+    from requests.exceptions import *
+    from urlparse import urlparse
+except ImportError, e:
+    print "failed=True msg='failed to import python module: %s'" % e
+    sys.exit(1)
+
+try:
+    from docker.errors import APIError as DockerAPIError
+except ImportError:
+    from docker.client import APIError as DockerAPIError
+
+class DockerImageManager:
+
+    def __init__(self, module):
+        self.module = module
+        self.path = self.module.params.get('path')
+        self.name = self.module.params.get('name')
+        self.tag = self.module.params.get('tag')
+        self.nocache = self.module.params.get('nocache')
+        docker_url = urlparse(module.params.get('docker_url'))
+        if 'DOCKER_HOST' in os.environ:
+            args = docker.utils.kwargs_from_env(assert_hostname=False)
+            args['timeout'] = module.params.get('timeout')
+            self.client = docker.Client(**args)
+        else:
+            self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout'))
+        self.changed = False
+        self.log = []
+        self.error_msg = None
+
+    def get_log(self, as_string=True):
+        return "".join(self.log) if as_string else self.log
+
+    def build(self):
+        stream = self.client.build(self.path, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True)
+        success_search = r'Successfully built ([0-9a-f]+)'
+        image_id = None
+        self.changed = True
+
+        for chunk in stream:
+            if not chunk:
+                continue
+
+            try:
+                chunk_json = json.loads(chunk)
+            except ValueError:
+                continue
+
+            if 'error' in chunk_json:
+                self.error_msg = chunk_json['error']
+                return None
+
+            if 'stream' in chunk_json:
+                output = chunk_json['stream']
+                self.log.append(output)
+                match = re.search(success_search, output)
+                if match:
+                    image_id = match.group(1)
+
+        # Just in case we skipped evaluating the JSON returned from build
+        # during every iteration, add an error if the image_id was never
+        # populated
+        if not image_id:
+            self.error_msg = 'Unknown error encountered'
+
+        return image_id
+
+    def has_changed(self):
+        return self.changed
+
+    def get_images(self):
+        filtered_images = []
+        images = self.client.images()
+        for i in images:
+            # Docker-py version >= 0.3 (Docker API >= 1.8)
+            if 'RepoTags' in i:
+                repotag = ':'.join([self.name, self.tag])
+                if not self.name or repotag in i['RepoTags']:
+                    filtered_images.append(i)
+            # Docker-py version < 0.3 (Docker API < 1.8)
+            elif (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']):
+                filtered_images.append(i)
+        return filtered_images
+
+    def remove_images(self):
+        images = self.get_images()
+        for i in images:
+            try:
+                self.client.remove_image(i['Id'])
+                self.changed = True
+            except DockerAPIError as e:
+                # image can be removed by docker if not used
+                pass
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec = dict(
+            path            = dict(required=False, default=None),
+            name            = dict(required=True),
+            tag             = dict(required=False, default="latest"),
+            nocache         = dict(default=False, type='bool'),
+            state           = dict(default='present', choices=['absent', 'present', 'build']),
+            docker_url      = dict(default='unix://var/run/docker.sock'),
+            timeout         = dict(default=600, type='int'),
+        )
+    )
+
+    try:
+        manager = DockerImageManager(module)
+        state = module.params.get('state')
+        failed = False
+        image_id = None
+        msg = ''
+        do_build = False
+
+        # build image if not exists
+        if state == "present":
+            images = manager.get_images()
+            if len(images) == 0:
+                do_build = True
+        # build image
+        elif state == "build":
+            do_build = True
+        # remove image or images
+        elif state == "absent":
+            manager.remove_images()
+
+        if do_build:
+            image_id = manager.build()
+            if image_id:
+                msg = "Image built: %s" % image_id
+            else:
+                failed = True
+                msg = "Error: %s\nLog:%s" % (manager.error_msg, manager.get_log())
+
+        module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id)
+
+    except DockerAPIError as e:
+        module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation)
+
+    except RequestException as e:
+        module.exit_json(failed=True, changed=manager.has_changed(), msg=repr(e))
+        
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/ch13/playbooks/library/docker.py b/ch13/playbooks/library/docker.py
new file mode 100644 (file)
index 0000000..5c47899
--- /dev/null
@@ -0,0 +1,1008 @@
+#!/usr/bin/python
+
+# (c) 2013, Cove Schneider
+# (c) 2014, Joshua Conner <joshua.conner@gmail.com>
+# (c) 2014, Pavel Antonov <antonov@adwz.ru>
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+DOCUMENTATION = '''
+---
+module: docker
+version_added: "1.4"
+short_description: manage docker containers
+description:
+     - Manage the life cycle of docker containers.
+     - If DOCKER_HOST is set, will get connection info from environment variables
+options:
+  count:
+    description:
+      - Set number of containers to run
+    required: False
+    default: 1
+    aliases: []
+  image:
+    description:
+       - Set container image to use
+    required: true
+    default: null
+    aliases: []
+  command:
+    description:
+       - Set command to run in a container on startup
+    required: false
+    default: null
+    aliases: []
+  name:
+    description:
+       - Set name for container (used to find single container or to provide links)
+    required: false
+    default: null
+    aliases: []
+    version_added: "1.5"
+  ports:
+    description:
+      - Set private to public port mapping specification using docker CLI-style syntax [([<host_interface>:[host_port]])|(<host_port>):]<container_port>[/udp]
+    required: false
+    default: null
+    aliases: []
+    version_added: "1.5"
+  expose:
+    description:
+      - Set container ports to expose for port mappings or links. (If the port is already exposed using EXPOSE in a Dockerfile, you don't need to expose it again.)
+    required: false
+    default: null
+    aliases: []
+    version_added: "1.5"
+  publish_all_ports:
+    description:
+      - Publish all exposed ports to the host interfaces
+    required: false
+    default: false
+    aliases: []
+    version_added: "1.5"
+  volumes:
+    description:
+      - Set volume(s) to mount on the container
+    required: false
+    default: null
+    aliases: []
+  volumes_from:
+    description:
+      - Set shared volume(s) from another container
+    required: false
+    default: null
+    aliases: []
+  links:
+    description:
+      - Link container(s) to other container(s) (e.g. links=redis,postgresql:db)
+    required: false
+    default: null
+    aliases: []
+    version_added: "1.5"
+  memory_limit:
+    description:
+      - Set RAM allocated to container
+    required: false
+    default: null
+    aliases: []
+    default: 256MB
+  docker_url:
+    description:
+      - URL of docker host to issue commands to
+    required: false
+    default: unix://var/run/docker.sock
+    aliases: []
+  docker_api_version:
+    description:
+      - Remote API version to use. This defaults to the current default as specified by docker-py.
+    required: false
+    default: docker-py default remote API version
+    aliases: []
+    version_added: "1.8"
+  username:
+    description:
+      - Set remote API username
+    required: false
+    default: null
+    aliases: []
+  password:
+    description:
+      - Set remote API password
+    required: false
+    default: null
+    aliases: []
+  email:
+    description:
+      - Set remote API email
+    required: false
+    default: null
+    aliases: []
+  hostname:
+    description:
+      - Set container hostname
+    required: false
+    default: null
+    aliases: []
+  env:
+    description:
+      - Set environment variables (e.g. env="PASSWORD=sEcRe7,WORKERS=4")
+    required: false
+    default: null
+    aliases: []
+  dns:
+    description:
+      - Set custom DNS servers for the container
+    required: false
+    default: null
+    aliases: []
+  detach:
+    description:
+      - Enable detached mode on start up, leaves container running in background
+    required: false
+    default: true
+    aliases: []
+  state:
+    description:
+      - Set the state of the container
+    required: false
+    default: present
+    choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ]
+    aliases: []
+  privileged:
+    description:
+      - Set whether the container should run in privileged mode
+    required: false
+    default: false
+    aliases: []
+  lxc_conf:
+    description:
+      - LXC config parameters,  e.g. lxc.aa_profile:unconfined
+    required: false
+    default:
+    aliases: []
+  name:
+    description:
+      - Set the name of the container (cannot use with count)
+    required: false
+    default: null
+    aliases: []
+    version_added: "1.5"
+  stdin_open:
+    description:
+      - Keep stdin open
+    required: false
+    default: false
+    aliases: []
+    version_added: "1.6"
+  tty:
+    description:
+      - Allocate a pseudo-tty
+    required: false
+    default: false
+    aliases: []
+    version_added: "1.6"
+  net:
+    description:
+      - Set Network mode for the container (bridge, none, container:<name|id>, host). Requires docker >= 0.11.
+    required: false
+    default: false
+    aliases: []
+    version_added: "1.8"
+  registry:
+    description:
+      - The remote registry URL to use for pulling images.
+    required: false
+    default: ''
+    aliases: []
+    version_added: "1.8"
+  restart_policy:
+    description:
+      - Set the container restart policy
+    required: false
+    default: false
+    aliases: []
+    version_added: "1.9"
+  restart_policy_retry:
+    description:
+      - Set the retry limit for container restart policy
+    required: false
+    default: false
+    aliases: []
+    version_added: "1.9"
+  insecure_registry:
+    description:
+      - Use insecure private registry by HTTP instead of HTTPS (needed for docker-py >= 0.5.0).
+    required: false
+    default: false
+    aliases: []
+    version_added: "1.9"
+  wait:
+    description:
+      - Wait until process terminates.
+      - Fails on non-zero return code
+
+author: Cove Schneider, Joshua Conner, Pavel Antonov
+requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ]
+'''
+
+EXAMPLES = '''
+Start one docker container running tomcat in each host of the web group and bind tomcat's listening port to 8080
+on the host:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: run tomcat servers
+    docker: image=centos command="service tomcat6 start" ports=8080
+
+The tomcat server's port is NAT'ed to a dynamic port on the host, but you can determine which port the server was
+mapped to using docker_containers:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: run tomcat servers
+    docker: image=centos command="service tomcat6 start" ports=8080 count=5
+  - name: Display IP address and port mappings for containers
+    debug: msg={{inventory_hostname}}:{{item['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}}
+    with_items: docker_containers
+
+Just as in the previous example, but iterates over the list of docker containers with a sequence:
+
+- hosts: web
+  sudo: yes
+  vars:
+    start_containers_count: 5
+  tasks:
+  - name: run tomcat servers
+    docker: image=centos command="service tomcat6 start" ports=8080 count={{start_containers_count}}
+  - name: Display IP address and port mappings for containers
+    debug: msg="{{inventory_hostname}}:{{docker_containers[{{item}}]['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}}"
+    with_sequence: start=0 end={{start_containers_count - 1}}
+
+Stop, remove all of the running tomcat containers and list the exit code from the stopped containers:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: stop tomcat servers
+    docker: image=centos command="service tomcat6 start" state=absent
+  - name: Display return codes from stopped containers
+    debug: msg="Returned {{inventory_hostname}}:{{item}}"
+    with_items: docker_containers
+
+Create a named container:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: run tomcat server
+    docker: image=centos name=tomcat command="service tomcat6 start" ports=8080
+
+Create multiple named containers:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: run tomcat servers
+    docker: image=centos name={{item}} command="service tomcat6 start" ports=8080
+    with_items:
+      - crookshank
+      - snowbell
+      - heathcliff
+      - felix
+      - sylvester
+
+Create containers named in a sequence:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: run tomcat servers
+    docker: image=centos name={{item}} command="service tomcat6 start" ports=8080
+    with_sequence: start=1 end=5 format=tomcat_%d.example.com
+
+Create two linked containers:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - name: ensure redis container is running
+    docker: image=crosbymichael/redis name=redis
+
+  - name: ensure redis_ambassador container is running
+    docker: image=svendowideit/ambassador ports=6379:6379 links=redis:redis name=redis_ambassador_ansible
+
+Create containers with options specified as key-value pairs and lists:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  - docker:
+        image: namespace/image_name
+        links:
+          - postgresql:db
+          - redis:redis
+
+
+Create containers with options specified as strings and lists as comma-separated strings:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  docker: image=namespace/image_name links=postgresql:db,redis:redis
+
+Create a container with no networking:
+
+- hosts: web
+  sudo: yes
+  tasks:
+  docker: image=namespace/image_name net=none
+
+'''
+
+HAS_DOCKER_PY = True
+
+import sys
+from urlparse import urlparse
+try:
+    import docker.client
+    import docker.utils
+    from requests.exceptions import *
+except ImportError, e:
+    HAS_DOCKER_PY = False
+
+if HAS_DOCKER_PY:
+    try:
+        from docker.errors import APIError as DockerAPIError
+    except ImportError:
+        from docker.client import APIError as DockerAPIError
+
+
+def _human_to_bytes(number):
+    suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+    if isinstance(number, int):
+        return number
+    if number[-1] == suffixes[0] and number[-2].isdigit():
+        return number[:-1]
+
+    i = 1
+    for each in suffixes[1:]:
+        if number[-len(each):] == suffixes[i]:
+            return int(number[:-len(each)]) * (1024 ** i)
+        i = i + 1
+
+    print "failed=True msg='Could not convert %s to integer'" % (number)
+    sys.exit(1)
+
+def _ansible_facts(container_list):
+    return {"docker_containers": container_list}
+
+def _docker_id_quirk(inspect):
+    # XXX: some quirk in docker
+    if 'ID' in inspect:
+        inspect['Id'] = inspect['ID']
+        del inspect['ID']
+    return inspect
+
+
+def get_split_image_tag(image):
+    # If image contains a host or org name, omit that from our check
+    if '/' in image:
+        registry, resource = image.rsplit('/', 1)
+    else:
+        registry, resource = None, image
+
+    # now we can determine if image has a tag
+    if ':' in resource:
+        resource, tag = resource.split(':', 1)
+        if registry:
+            resource = '/'.join((registry, resource))
+    else:
+        tag = "latest"
+        resource = image
+
+    return resource, tag
+
+def get_docker_py_versioninfo():
+    if hasattr(docker, '__version__'):
+        # a '__version__' attribute was added to the module but not until
+        # after 0.3.0 was pushed to pypi. If it's there, use it.
+        version = []
+        for part in docker.__version__.split('.'):
+            try:
+                version.append(int(part))
+            except ValueError:
+                for idx, char in enumerate(part):
+                    if not char.isdigit():
+                        nondigit = part[idx:]
+                        digit = part[:idx]
+                if digit:
+                    version.append(int(digit))
+                if nondigit:
+                    version.append(nondigit)
+    elif hasattr(docker.Client, '_get_raw_response_socket'):
+        # HACK: if '__version__' isn't there, we check for the existence of
+        # `_get_raw_response_socket` in the docker.Client class, which was
+        # added in 0.3.0
+        version = (0, 3, 0)
+    else:
+        # This is untrue but this module does not function with a version less
+        # than 0.3.0 so it's okay to lie here.
+        version = (0,)
+
+    return tuple(version)
+
+def check_dependencies(module):
+    """
+    Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
+    helpful error message if it isn't.
+    """
+    if not HAS_DOCKER_PY:
+        module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
+    else:
+        versioninfo = get_docker_py_versioninfo()
+        if versioninfo < (0, 3, 0):
+            module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
+
+
+class DockerManager(object):
+
+    counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0}
+    _capabilities = set()
+    # Map optional parameters to minimum (docker-py version, server APIVersion)
+    # docker-py version is a tuple of ints because we have to compare them
+    # server APIVersion is passed to a docker-py function that takes strings
+    _cap_ver_req = {
+            'dns': ((0, 3, 0), '1.10'),
+            'volumes_from': ((0, 3, 0), '1.10'),
+            'restart_policy': ((0, 5, 0), '1.14'),
+            # Clientside only
+            'insecure_registry': ((0, 5, 0), '0.0')
+            }
+
+    def __init__(self, module):
+        self.module = module
+
+        self.binds = None
+        self.volumes = None
+        if self.module.params.get('volumes'):
+            self.binds = {}
+            self.volumes = {}
+            vols = self.module.params.get('volumes')
+            for vol in vols:
+                parts = vol.split(":")
+                # regular volume
+                if len(parts) == 1:
+                    self.volumes[parts[0]] = {}
+                # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
+                elif 2 <= len(parts) <= 3:
+                    # default to read-write
+                    ro = False
+                    # with supplied bind mode
+                    if len(parts) == 3:
+                        if parts[2] not in ['ro', 'rw']:
+                            self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"')
+                        else:
+                            ro = parts[2] == 'ro'
+                    self.binds[parts[0]] = {'bind': parts[1], 'ro': ro }
+                else:
+                    self.module.fail_json(msg='volumes support 1 to 3 arguments')
+
+        self.lxc_conf = None
+        if self.module.params.get('lxc_conf'):
+            self.lxc_conf = []
+            options = self.module.params.get('lxc_conf')
+            for option in options:
+                parts = option.split(':')
+                self.lxc_conf.append({"Key": parts[0], "Value": parts[1]})
+
+        self.exposed_ports = None
+        if self.module.params.get('expose'):
+            self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose'))
+
+        self.port_bindings = None
+        if self.module.params.get('ports'):
+            self.port_bindings = self.get_port_bindings(self.module.params.get('ports'))
+
+        self.links = None
+        if self.module.params.get('links'):
+            self.links = self.get_links(self.module.params.get('links'))
+
+        self.env = self.module.params.get('env', None)
+
+        # connect to docker server
+        docker_url = urlparse(module.params.get('docker_url'))
+        docker_api_version = module.params.get('docker_api_version')
+
+        # If DOCKER_HOST is set, use environment variables to configure client
+        if 'DOCKER_HOST' in os.environ:
+            self.client = docker.Client(**docker.utils.kwargs_from_env(assert_hostname=False))
+        else:
+            self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version)
+
+        self.docker_py_versioninfo = get_docker_py_versioninfo()
+
+    def _check_capabilties(self):
+        """
+        Create a list of available capabilities
+        """
+        api_version = self.client.version()['ApiVersion']
+        for cap, req_vers in self._cap_ver_req.items():
+            if (self.docker_py_versioninfo >= req_vers[0] and
+                    docker.utils.compare_version(req_vers[1], api_version) >= 0):
+                self._capabilities.add(cap)
+
+    def ensure_capability(self, capability, fail=True):
+        """
+        Some of the functionality this ansible module implements are only
+        available in newer versions of docker.  Ensure that the capability
+        is available here.
+
+        If fail is set to False then return True or False depending on whether
+        we have the capability.  Otherwise, simply fail and exit the module if
+        we lack the capability.
+        """
+        if not self._capabilities:
+            self._check_capabilties()
+
+        if capability in self._capabilities:
+            return True
+
+        if not fail:
+            return False
+
+        api_version = self.client.version()['ApiVersion']
+        self.module.fail_json(msg='Specifying the `%s` parameter requires'
+                ' docker-py: %s, docker server apiversion %s; found'
+                ' docker-py: %s, server: %s' % (
+                    capability,
+                    '.'.join(self._cap_ver_req[capability][0]),
+                    self._cap_ver_req[capability][1],
+                    '.'.join(self.docker_py_versioninfo),
+                    api_version))
+
+    def get_links(self, links):
+        """
+        Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link
+        """
+        processed_links = {}
+
+        for link in links:
+            parsed_link = link.split(':', 1)
+            if(len(parsed_link) == 2):
+                processed_links[parsed_link[0]] = parsed_link[1]
+            else:
+                processed_links[parsed_link[0]] = parsed_link[0]
+
+        return processed_links
+
+
+    def get_exposed_ports(self, expose_list):
+        """
+        Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax.
+        """
+        if expose_list:
+            exposed = []
+            for port in expose_list:
+                port = str(port).strip()
+                if port.endswith('/tcp') or port.endswith('/udp'):
+                    port_with_proto = tuple(port.split('/'))
+                else:
+                    # assume tcp protocol if not specified
+                    port_with_proto = (port, 'tcp')
+                exposed.append(port_with_proto)
+            return exposed
+        else:
+            return None
+
+
+    def get_port_bindings(self, ports):
+        """
+        Parse the `ports` string into a port bindings dict for the `start_container` call.
+        """
+        binds = {}
+        for port in ports:
+            # ports could potentially be an array like [80, 443], so we make sure they're strings
+            # before splitting
+            parts = str(port).split(':')
+            container_port = parts[-1]
+            if '/' not in container_port:
+                container_port = int(parts[-1])
+
+            p_len = len(parts)
+            if p_len == 1:
+                # Bind `container_port` of the container to a dynamically
+                # allocated TCP port on all available interfaces of the host
+                # machine.
+                bind = ('0.0.0.0',)
+            elif p_len == 2:
+                # Bind `container_port` of the container to port `parts[0]` on
+                # all available interfaces of the host machine.
+                bind = ('0.0.0.0', int(parts[0]))
+            elif p_len == 3:
+                # Bind `container_port` of the container to port `parts[1]` on
+                # IP `parts[0]` of the host machine. If `parts[1]` empty bind
+                # to a dynamically allocacted port of IP `parts[0]`.
+                bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
+
+            if container_port in binds:
+                old_bind = binds[container_port]
+                if isinstance(old_bind, list):
+                    # append to list if it already exists
+                    old_bind.append(bind)
+                else:
+                    # otherwise create list that contains the old and new binds
+                    binds[container_port] = [binds[container_port], bind]
+            else:
+                binds[container_port] = bind
+
+        return binds
+
+
+    def get_summary_counters_msg(self):
+        msg = ""
+        for k, v in self.counters.iteritems():
+            msg = msg + "%s %d " % (k, v)
+
+        return msg
+
+    def increment_counter(self, name):
+        self.counters[name] = self.counters[name] + 1
+
+    def has_changed(self):
+        for k, v in self.counters.iteritems():
+            if v > 0:
+                return True
+
+        return False
+
+    def get_inspect_containers(self, containers):
+        inspect = []
+        for i in containers:
+            details = self.client.inspect_container(i['Id'])
+            details = _docker_id_quirk(details)
+            inspect.append(details)
+
+        return inspect
+
+    def get_deployed_containers(self):
+        """determine which images/commands are running already"""
+        image = self.module.params.get('image')
+        command = self.module.params.get('command')
+        if command:
+            command = command.strip()
+        name = self.module.params.get('name')
+        if name and not name.startswith('/'):
+            name = '/' + name
+        deployed = []
+
+        # if we weren't given a tag with the image, we need to only compare on the image name, as that
+        # docker will give us back the full image name including a tag in the container list if one exists.
+        image, tag = get_split_image_tag(image)
+
+        for i in self.client.containers(all=True):
+            running_image, running_tag = get_split_image_tag(i['Image'])
+            running_command = i['Command'].strip()
+
+            name_matches = False
+            if i["Names"]:
+                name_matches = (name and name in i['Names'])
+            image_matches = (running_image == image)
+            tag_matches = (not tag or running_tag == tag)
+            # if a container has an entrypoint, `command` will actually equal
+            # '{} {}'.format(entrypoint, command)
+            command_matches = (not command or running_command.endswith(command))
+
+            if name_matches or (name is None and image_matches and tag_matches and command_matches):
+                details = self.client.inspect_container(i['Id'])
+                details = _docker_id_quirk(details)
+                deployed.append(details)
+
+        return deployed
+
+    def get_running_containers(self):
+        running = []
+        for i in self.get_deployed_containers():
+            if i['State']['Running'] == True and i['State'].get('Ghost', False) == False:
+                running.append(i)
+
+        return running
+
+    def create_containers(self, count=1):
+        params = {'image':        self.module.params.get('image'),
+                  'command':      self.module.params.get('command'),
+                  'ports':        self.exposed_ports,
+                  'volumes':      self.volumes,
+                  'mem_limit':    _human_to_bytes(self.module.params.get('memory_limit')),
+                  'environment':  self.env,
+                  'hostname':     self.module.params.get('hostname'),
+                  'detach':       self.module.params.get('detach'),
+                  'name':         self.module.params.get('name'),
+                  'stdin_open':   self.module.params.get('stdin_open'),
+                  'tty':          self.module.params.get('tty'),
+                  'dns':          self.module.params.get('dns'),
+                  'host_config':  docker.utils.create_host_config(binds=self.binds),
+                  # Comment out volumes_from here or it throws an exception
+                  # 'volumes_from': self.module.params.get('volumes_from'),
+                  }
+
+        if params['dns'] is not None:
+            self.ensure_capability('dns')
+
+        # if params['volumes_from'] is not None:
+        #     self.ensure_capability('volumes_from')
+
+        extra_params = {}
+        if self.module.params.get('insecure_registry'):
+            if self.ensure_capability('insecure_registry', fail=False):
+                extra_params['insecure_registry'] = self.module.params.get('insecure_registry')
+
+        def do_create(count, params):
+            results = []
+            for _ in range(count):
+                result = self.client.create_container(**params)
+                self.increment_counter('created')
+                results.append(result)
+
+            return results
+
+        try:
+            containers = do_create(count, params)
+        except:
+            resource = self.module.params.get('image')
+            image, tag = get_split_image_tag(resource)
+            if self.module.params.get('username'):
+                try:
+                    self.client.login(
+                        self.module.params.get('username'),
+                        password=self.module.params.get('password'),
+                        email=self.module.params.get('email'),
+                        registry=self.module.params.get('registry')
+                    )
+                except:
+                    self.module.fail_json(msg="failed to login to the remote registry, check your username/password.")
+            try:
+                self.client.pull(image, tag=tag, **extra_params)
+            except:
+                self.module.fail_json(msg="failed to pull the specified image: %s" % resource)
+            self.increment_counter('pull')
+            containers = do_create(count, params)
+
+        return containers
+
+    def start_containers(self, containers, wait=False):
+        params = {
+            'lxc_conf': self.lxc_conf,
+            'binds': self.binds,
+            'port_bindings': self.port_bindings,
+            'publish_all_ports': self.module.params.get('publish_all_ports'),
+            'privileged':   self.module.params.get('privileged'),
+            'links': self.links,
+            'network_mode': self.module.params.get('net'),
+        }
+
+        optionals = {}
+        for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry'):
+            optionals[optional_param] = self.module.params.get(optional_param)
+
+        if optionals['dns'] is not None:
+            self.ensure_capability('dns')
+            params['dns'] = optionals['dns']
+
+        if optionals['volumes_from'] is not None:
+            self.ensure_capability('volumes_from')
+            params['volumes_from'] = optionals['volumes_from']
+
+        if optionals['restart_policy'] is not None:
+            self.ensure_capability('restart_policy')
+            params['restart_policy'] = { 'Name': optionals['restart_policy'] }
+            if params['restart_policy']['Name'] == 'on-failure':
+                params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
+
+        for i in containers:
+            self.client.start(i['Id'], **params)
+            self.increment_counter('started')
+            if wait:
+                self.wait_for_container(i)
+
+    def stop_containers(self, containers):
+        for i in containers:
+            self.client.stop(i['Id'])
+            self.increment_counter('stopped')
+
+        return [self.client.wait(i['Id']) for i in containers]
+
+    def remove_containers(self, containers):
+        for i in containers:
+            self.client.remove_container(i['Id'])
+            self.increment_counter('removed')
+
+    def kill_containers(self, containers):
+        for i in containers:
+            self.client.kill(i['Id'])
+            self.increment_counter('killed')
+
+    def restart_containers(self, containers, wait=False):
+        for i in containers:
+            self.client.restart(i['Id'])
+            self.increment_counter('restarted')
+            if wait:
+                self.wait_for_container(i)
+
+    def wait_for_container(self, container):
+        cid = container['Id']
+        rc = self.client.wait(cid)
+        if rc != 0:
+            # Use the container's output as the fail message
+            msg = self.client.logs(cid, stdout=True, stderr=True,
+                                   stream=False, timestamps=False)
+            self.module.fail_json(rc=rc, msg=msg)
+
+def main():
+    module = AnsibleModule(
+        argument_spec = dict(
+            count           = dict(default=1),
+            image           = dict(required=True),
+            command         = dict(required=False, default=None),
+            expose          = dict(required=False, default=None, type='list'),
+            ports           = dict(required=False, default=None, type='list'),
+            publish_all_ports = dict(default=False, type='bool'),
+            volumes         = dict(default=None, type='list'),
+            volumes_from    = dict(default=None),
+            links           = dict(default=None, type='list'),
+            memory_limit    = dict(default=0),
+            memory_swap     = dict(default=0),
+            docker_url      = dict(default='unix://var/run/docker.sock'),
+            docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION),
+            username        = dict(default=None),
+            password        = dict(),
+            email           = dict(),
+            registry        = dict(),
+            hostname        = dict(default=None),
+            env             = dict(type='dict'),
+            dns             = dict(),
+            detach          = dict(default=True, type='bool'),
+            state           = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']),
+            restart_policy  = dict(default=None, choices=['always', 'on-failure', 'no']),
+            restart_policy_retry = dict(default=0, type='int'),
+            debug           = dict(default=False, type='bool'),
+            privileged      = dict(default=False, type='bool'),
+            stdin_open      = dict(default=False, type='bool'),
+            tty             = dict(default=False, type='bool'),
+            lxc_conf        = dict(default=None, type='list'),
+            name            = dict(default=None),
+            net             = dict(default=None),
+            insecure_registry = dict(default=False, type='bool'),
+            wait            = dict(default=False, type='bool'),
+        )
+    )
+
+    check_dependencies(module)
+
+    try:
+        manager = DockerManager(module)
+        state = module.params.get('state')
+        count = int(module.params.get('count'))
+        name = module.params.get('name')
+        image = module.params.get('image')
+        wait = module.params.get('wait')
+
+        if count < 0:
+            module.fail_json(msg="Count must be greater than zero")
+        if count > 1 and name:
+            module.fail_json(msg="Count and name must not be used together")
+
+        running_containers = manager.get_running_containers()
+        running_count = len(running_containers)
+        delta = count - running_count
+        deployed_containers = manager.get_deployed_containers()
+        facts = None
+        failed = False
+        changed = False
+
+        # start/stop containers
+        if state in [ "running", "present" ]:
+
+            # make sure a container with `name` exists, if not create and start it
+            if name:
+                # first determine if a container with this name exists
+                existing_container = None
+                for deployed_container in deployed_containers:
+                    if deployed_container.get('Name') == '/%s' % name:
+                        existing_container = deployed_container
+                        break
+
+                # the named container is running, but with a
+                # different image or tag, so we stop it first
+                if existing_container and existing_container.get('Config', dict()).get('Image') != image:
+                    manager.stop_containers([existing_container])
+                    manager.remove_containers([existing_container])
+                    running_containers = manager.get_running_containers()
+                    deployed_containers = manager.get_deployed_containers()
+                    existing_container = None
+
+                # if the container isn't running (or if we stopped the
+                # old version above), create and (maybe) start it up now
+                if not existing_container:
+                    containers = manager.create_containers(1)
+                    if state == "present": # otherwise it get (re)started later anyways..
+                        manager.start_containers(containers)
+                        running_containers = manager.get_running_containers()
+                    deployed_containers = manager.get_deployed_containers()
+
+            if state == "running":
+                # make sure a container with `name` is running
+                if name and "/" + name not in map(lambda x: x.get('Name'), running_containers):
+                    manager.start_containers(deployed_containers, wait=wait)
+
+                # start more containers if we don't have enough
+                elif delta > 0:
+                    containers = manager.create_containers(delta)
+                    manager.start_containers(containers, wait=wait)
+
+                # stop containers if we have too many
+                elif delta < 0:
+                    containers_to_stop = running_containers[0:abs(delta)]
+                    containers = manager.stop_containers(containers_to_stop)
+                    manager.remove_containers(containers_to_stop)
+
+
+                facts = manager.get_running_containers()
+            else:
+                facts = manager.get_deployed_containers()
+
+        # stop and remove containers
+        elif state == "absent":
+            facts = manager.stop_containers(deployed_containers)
+            manager.remove_containers(deployed_containers)
+
+        # stop containers
+        elif state == "stopped":
+            facts = manager.stop_containers(running_containers)
+
+        # kill containers
+        elif state == "killed":
+            manager.kill_containers(running_containers)
+
+        # restart containers
+        elif state == "restarted":
+            manager.restart_containers(running_containers)
+            facts = manager.get_inspect_containers(running_containers)
+
+        msg = "%s container(s) running image %s with command %s" % \
+                (manager.get_summary_counters_msg(), module.params.get('image'), module.params.get('command'))
+        changed = manager.has_changed()
+
+        module.exit_json(failed=failed, changed=changed, msg=msg, ansible_facts=_ansible_facts(facts))
+
+    except DockerAPIError, e:
+        changed = manager.has_changed()
+        module.exit_json(failed=True, changed=changed, msg="Docker API error: " + e.explanation)
+
+    except RequestException, e:
+        changed = manager.has_changed()
+        module.exit_json(failed=True, changed=changed, msg=repr(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+    main()
diff --git a/ch13/playbooks/memcached/Dockerfile b/ch13/playbooks/memcached/Dockerfile
new file mode 100644 (file)
index 0000000..144af5d
--- /dev/null
@@ -0,0 +1,23 @@
+FROM ubuntu:trusty
+MAINTAINER lorin@ansiblebook.com
+
+# Based on
+# https://www.digitalocean.com/community/tutorials/docker-explained-how-to-create-docker-containers-running-memcached
+
+# Update the default application repository sources list
+RUN apt-get update
+
+# Install Memcached
+RUN apt-get install -y memcached
+
+# Port to expose (default: 11211)
+EXPOSE 11211
+
+# Default Memcached run command arguments
+CMD ["-m", "128"]
+
+# Set the user to run Memcached daemon
+USER daemon
+
+# Set the entrypoint to memcached binary
+ENTRYPOINT memcached
diff --git a/ch13/playbooks/memcached/Makefile b/ch13/playbooks/memcached/Makefile
new file mode 100644 (file)
index 0000000..becdfe9
--- /dev/null
@@ -0,0 +1,10 @@
+.PHONY: image bash
+
+IMAGE=lorin/memcached
+
+image:
+       docker build -t $(IMAGE) .
+
+bash:
+       docker run -ti $(IMAGE) /bin/bash
+
diff --git a/ch13/playbooks/mezzanine.env b/ch13/playbooks/mezzanine.env
new file mode 100644 (file)
index 0000000..dc187f6
--- /dev/null
@@ -0,0 +1,9 @@
+SECRET_KEY=randomsecretkey
+NEVERCACHE_KEY=randomnevercachekey
+ALLOWED_HOSTS="*"
+DATABASE_NAME=mezzanine
+DATABASE_USER=mezzanine
+DATABASE_PASSWORD=password
+DATABASE_HOST=172.17.0.42
+DATABASE_PORT=5432
+GUNICORN_PORT=8000
diff --git a/ch13/playbooks/mezzanine/Dockerfile b/ch13/playbooks/mezzanine/Dockerfile
new file mode 100644 (file)
index 0000000..d614969
--- /dev/null
@@ -0,0 +1,14 @@
+FROM ansible/ubuntu14.04-ansible:stable
+MAINTAINER Lorin Hochstein <lorin@ansiblebook.com>
+
+ADD ansible /srv/ansible
+WORKDIR /srv/ansible
+
+RUN ansible-playbook mezzanine-container.yml -c local
+
+VOLUME /srv/project/static
+
+WORKDIR /srv/project
+
+EXPOSE 8000
+CMD ["gunicorn_django", "-c", "gunicorn.conf.py"]
diff --git a/ch13/playbooks/mezzanine/Makefile b/ch13/playbooks/mezzanine/Makefile
new file mode 100644 (file)
index 0000000..7516ba8
--- /dev/null
@@ -0,0 +1,10 @@
+.PHONY: image run bash
+
+IMAGE=lorin/mezzanine
+
+image:
+       docker build -t $(IMAGE) .
+
+bash:
+       docker run -ti $(IMAGE) /bin/bash
+
diff --git a/ch13/playbooks/mezzanine/ansible/files/gunicorn.conf.py b/ch13/playbooks/mezzanine/ansible/files/gunicorn.conf.py
new file mode 100644 (file)
index 0000000..9c17675
--- /dev/null
@@ -0,0 +1,8 @@
+from __future__ import unicode_literals
+import multiprocessing
+import os
+
+bind = "0.0.0.0:{}".format(os.environ.get("GUNICORN_PORT", 8000))
+workers = multiprocessing.cpu_count() * 2 + 1
+loglevel = "error"
+proc_name = "mezzanine"
diff --git a/ch13/playbooks/mezzanine/ansible/files/local_settings.py b/ch13/playbooks/mezzanine/ansible/files/local_settings.py
new file mode 100644 (file)
index 0000000..c984c2a
--- /dev/null
@@ -0,0 +1,44 @@
+from __future__ import unicode_literals
+import os
+
+SECRET_KEY = os.environ.get("SECRET_KEY", "")
+NEVERCACHE_KEY = os.environ.get("NEVERCACHE_KEY", "")
+ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS", "")
+
+DATABASES = {
+    "default": {
+        # Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
+        "ENGINE": "django.db.backends.postgresql_psycopg2",
+        # DB name or path to database file if using sqlite3.
+        "NAME": os.environ.get("DATABASE_NAME", ""),
+        # Not used with sqlite3.
+        "USER": os.environ.get("DATABASE_USER", ""),
+        # Not used with sqlite3.
+        "PASSWORD": os.environ.get("DATABASE_PASSWORD", ""),
+        # Set to empty string for localhost. Not used with sqlite3.
+        "HOST": os.environ.get("DATABASE_HOST", ""),
+        # Set to empty string for default. Not used with sqlite3.
+        "PORT": os.environ.get("DATABASE_PORT", "")
+    }
+}
+
+SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
+
+CACHE_MIDDLEWARE_SECONDS = 60
+
+CACHE_MIDDLEWARE_KEY_PREFIX = "mezzanine"
+
+CACHES = {
+    "default": {
+        "BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
+        "LOCATION": os.environ.get("MEMCACHED_LOCATION", "memcached:11211"),
+    }
+}
+
+SESSION_ENGINE = "django.contrib.sessions.backends.cache"
+
+TWITTER_ACCESS_TOKEN_KEY = os.environ.get("TWITTER_ACCESS_TOKEN_KEY ", "")
+TWITTER_ACCESS_TOKEN_SECRET = os.environ.get("TWITTER_ACCESS_TOKEN_SECRET ", "")
+TWITTER_CONSUMER_KEY = os.environ.get("TWITTER_CONSUMER_KEY ", "")
+TWITTER_CONSUMER_SERCRET = os.environ.get("TWITTER_CONSUMER_SERCRET ", "")
+TWITTER_DEFAULT_QUERY = "from:ansiblebook"
diff --git a/ch13/playbooks/mezzanine/ansible/files/scripts/setadmin.py b/ch13/playbooks/mezzanine/ansible/files/scripts/setadmin.py
new file mode 100755 (executable)
index 0000000..7800302
--- /dev/null
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# A script to set the admin credentials
+# Assumes two environment variables
+#
+# PROJECT_DIR: the project directory (e.g., ~/projname)
+# ADMIN_PASSWORD: admin user's password
+
+import os
+import sys
+
+# Add the project directory to system path
+proj_dir = os.path.expanduser(os.environ['PROJECT_DIR'])
+sys.path.append(proj_dir)
+
+os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
+
+
+from mezzanine.utils.models import get_user_model
+User = get_user_model()
+u, _ = User.objects.get_or_create(username='admin')
+u.is_staff = u.is_superuser = True
+u.set_password(os.environ['ADMIN_PASSWORD'])
+u.save()
diff --git a/ch13/playbooks/mezzanine/ansible/files/scripts/setsite.py b/ch13/playbooks/mezzanine/ansible/files/scripts/setsite.py
new file mode 100755 (executable)
index 0000000..18ca33f
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# A script to set the site domain
+# Assumes two environment variables
+#
+# PROJECT_DIR: the project directory (e.g., ~/projname)
+# WEBSITE_DOMAIN: the domain of the site (e.g., www.example.com)
+
+import os
+import sys
+
+# Add the project directory to system path
+proj_dir = os.path.expanduser(os.environ['PROJECT_DIR'])
+sys.path.append(proj_dir)
+
+os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
+from django.conf import settings
+from django.contrib.sites.models import Site
+
+domain = os.environ['WEBSITE_DOMAIN']
+Site.objects.filter(id=settings.SITE_ID).update(domain=domain)
+Site.objects.get_or_create(domain=domain)
diff --git a/ch13/playbooks/mezzanine/ansible/mezzanine-container.yml b/ch13/playbooks/mezzanine/ansible/mezzanine-container.yml
new file mode 100644 (file)
index 0000000..9388cc1
--- /dev/null
@@ -0,0 +1,48 @@
+- name: Create Mezzanine container
+  hosts: local
+  vars:
+    mezzanine_repo_url: https://github.com/lorin/mezzanine-example.git
+    mezzanine_proj_path: /srv/project
+    mezzanine_reqs_path: requirements.txt
+    script_path: /srv/scripts
+  tasks:
+  - name: install apt packages
+    apt: pkg={{ item }} update_cache=yes cache_valid_time=3600
+    with_items:
+      - git
+      - gunicorn
+      - libjpeg-dev
+      - libpq-dev
+      - python-dev
+      - python-pip
+      - python-psycopg2
+      - python-setuptools
+  - name: check out the repository on the host
+    git: repo={{ mezzanine_repo_url }} dest={{ mezzanine_proj_path }} accept_hostkey=yes
+  - name: install required python packages
+    pip: name={{ item }}
+    with_items:
+      - south
+      - psycopg2
+      - django-compressor
+      - python-memcached
+  - name: install requirements.txt
+    pip: requirements={{ mezzanine_proj_path }}/{{ mezzanine_reqs_path }}
+  - name: generate the settings file
+    copy: src=files/local_settings.py dest={{ mezzanine_proj_path }}/local_settings.py
+  - name: set the gunicorn config file
+    copy: src=files/gunicorn.conf.py dest={{ mezzanine_proj_path }}/gunicorn.conf.py
+  - name: collect static assets into the appropriate directory
+    django_manage: command=collectstatic app_path={{ mezzanine_proj_path }}
+    environment:
+      # We can't run collectstatic if the secret key is blank,
+      # so we just pass in an arbitrary one
+      SECRET_KEY: nonblanksecretkey
+  - name: script directory
+    file: path={{ script_path }} state=directory
+  - name: copy scripts for setting site id and admin at launch time
+    copy: src=files/scripts/{{ item }} dest={{ script_path }}/{{ item }} mode=0755
+    with_items:
+      - setadmin.py
+      - setsite.py
+
diff --git a/ch13/playbooks/nginx/Dockerfile b/ch13/playbooks/nginx/Dockerfile
new file mode 100644 (file)
index 0000000..75d5ada
--- /dev/null
@@ -0,0 +1,29 @@
+FROM debian:wheezy
+# From
+# https://github.com/nginxinc/docker-nginx/blob/57da11369acbec3256b0c2704a50282eeabb684f/Dockerfile
+
+# We repeat rather than do it from this one because of
+# https://github.com/docker/docker-py/issues/447
+
+
+RUN apt-key adv --keyserver pgp.mit.edu --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62
+RUN echo "deb http://nginx.org/packages/mainline/debian/ wheezy nginx" >> /etc/apt/sources.list
+
+ENV NGINX_VERSION 1.7.9-1~wheezy
+
+RUN apt-get update && apt-get install -y nginx=${NGINX_VERSION} && rm -rf /var/lib/apt/lists/*
+
+# forward request and error logs to docker log collector
+RUN ln -sf /dev/stdout /var/log/nginx/access.log
+RUN ln -sf /dev/stderr /var/log/nginx/error.log
+
+# See Docker bug referenced at the top
+# VOLUME ["/var/cache/nginx"]
+
+EXPOSE 80 443
+
+CMD ["nginx", "-g", "daemon off;"]
+
+RUN rm /etc/nginx/conf.d/default.conf \
+       /etc/nginx/conf.d/example_ssl.conf
+COPY nginx.conf /etc/nginx/conf.d/mezzanine.conf
diff --git a/ch13/playbooks/nginx/Makefile b/ch13/playbooks/nginx/Makefile
new file mode 100644 (file)
index 0000000..0088d01
--- /dev/null
@@ -0,0 +1,10 @@
+.PHONY: image run bash
+
+IMAGE=lorin/nginx-mezzanine
+
+image:
+       docker build -t $(IMAGE) .
+
+bash:
+       docker run -ti $(IMAGE) /bin/bash
+
diff --git a/ch13/playbooks/nginx/nginx.conf b/ch13/playbooks/nginx/nginx.conf
new file mode 100644 (file)
index 0000000..c58bebb
--- /dev/null
@@ -0,0 +1,47 @@
+upstream mezzanine {
+    server mezzanine:8000;
+}
+
+server {
+
+    listen 80;
+
+    listen 443 ssl;
+
+    client_max_body_size 10M;
+    keepalive_timeout    15;
+
+    ssl_certificate      /certs/nginx.crt;
+    ssl_certificate_key  /certs/nginx.key;
+    ssl_session_cache    shared:SSL:10m;
+    ssl_session_timeout  10m;
+    ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK;
+    ssl_prefer_server_ciphers on;
+
+    location / {
+        proxy_redirect      off;
+        proxy_set_header    Host                    $host;
+        proxy_set_header    X-Real-IP               $remote_addr;
+        proxy_set_header    X-Forwarded-For         $proxy_add_x_forwarded_for;
+        proxy_set_header    X-Forwarded-Protocol    $scheme;
+        proxy_pass          http://mezzanine;
+    }
+
+    location /static/ {
+        root            /srv/project;
+        access_log      off;
+        log_not_found   off;
+    }
+
+    location /robots.txt {
+        root            /srv/project/static;
+        access_log      off;
+        log_not_found   off;
+    }
+
+    location /favicon.ico {
+        root            /srv/project/static/img;
+        access_log      off;
+        log_not_found   off;
+    }
+}
diff --git a/ch13/playbooks/run-mezzanine.yml b/ch13/playbooks/run-mezzanine.yml
new file mode 100755 (executable)
index 0000000..0948164
--- /dev/null
@@ -0,0 +1,101 @@
+#!/usr/bin/env ansible-playbook
+---
+- name: run mezzanine from containers
+  hosts: localhost
+  vars_files:
+    - secrets.yml
+  vars:
+    # The postgres container uses the same name for the database
+    # and the user
+    database_name: mezzanine
+    database_user: mezzanine
+    database_port: 5432
+    gunicorn_port: 8000
+    docker_host: "{{ lookup('env', 'DOCKER_HOST') | regex_replace('^tcp://(.*):\\d+$', '\\\\1') | default('localhost', true) }}"
+    project_dir: /srv/project
+    website_domain: "{{ docker_host }}.xip.io"
+    mezzanine_env:
+      SECRET_KEY: "{{ secret_key }}"
+      NEVERCACHE_KEY: "{{ nevercache_key }}"
+      ALLOWED_HOSTS: "*"
+      DATABASE_NAME: "{{ database_name }}"
+      DATABASE_USER: "{{ database_user }}"
+      DATABASE_PASSWORD: "{{ database_password }}"
+      DATABASE_HOST: "{{ database_host }}"
+      DATABASE_PORT: "{{ database_port }}"
+      GUNICORN_PORT: "{{ gunicorn_port }}"
+    setadmin_env:
+      PROJECT_DIR: "{{ project_dir }}"
+      ADMIN_PASSWORD: "{{ admin_password }}"
+    setsite_env:
+      PROJECT_DIR: "{{ project_dir }}"
+      WEBSITE_DOMAIN: "{{ website_domain }}"
+
+  tasks:
+    - name: start the postgres container
+      docker:
+        image: postgres:9.4
+        name: postgres
+        publish_all_ports: True
+        env:
+          POSTGRES_USER: "{{ database_user }}"
+          POSTGRES_PASSWORD: "{{ database_password }}"
+    - name: capture database ip address and mapped port
+      set_fact:
+        database_host: "{{ docker_containers[0].NetworkSettings.IPAddress }}"
+        mapped_database_port: "{{ docker_containers[0].NetworkSettings.Ports['5432/tcp'][0].HostPort}}"
+    - name: wait for database to come up
+      wait_for: host={{ docker_host }} port={{ mapped_database_port }}
+    - name: initialize database
+      docker:
+        image: lorin/mezzanine:latest
+        command: python manage.py {{ item }} --noinput
+        wait: True
+        env: "{{ mezzanine_env }}"
+      with_items:
+        - syncdb
+        - migrate
+      register: django
+    - name: set the site id
+      docker:
+        image: lorin/mezzanine:latest
+        command: /srv/scripts/setsite.py
+        env: "{{ setsite_env.update(mezzanine_env) }}{{ setsite_env }}"
+        wait: yes
+    - name: set the admin password
+      docker:
+        image: lorin/mezzanine:latest
+        command: /srv/scripts/setadmin.py
+        env: "{{ setadmin_env.update(mezzanine_env) }}{{ setadmin_env }}"
+        wait: yes
+    - name: start the memcached container
+      docker:
+        image: lorin/memcached:latest
+        name: memcached
+    - name: start the mezzanine container
+      docker:
+        image: lorin/mezzanine:latest
+        name: mezzanine
+        env: "{{ mezzanine_env }}"
+        links: memcached
+    - name: start the mezzanine cron job
+      docker:
+        image: lorin/mezzanine:latest
+        name: mezzanine
+        env: "{{ mezzanine_env }}"
+        command: cron -f
+    - name: start the cert container
+      docker:
+        image: lorin/certs:latest
+        name: certs
+    - name: run nginx
+      docker:
+        image: lorin/nginx-mezzanine:latest
+        ports:
+          - 80:80
+          - 443:443
+        name: nginx
+        volumes_from:
+          - mezzanine
+          - certs
+        links: mezzanine
diff --git a/ch13/playbooks/secrets.yml b/ch13/playbooks/secrets.yml
new file mode 100644 (file)
index 0000000..454d4d7
--- /dev/null
@@ -0,0 +1,4 @@
+database_password: password
+secret_key: randomsecretkey
+nevercache_key: randomnevercachekey
+admin_password: password