Skip to content

Commit

Permalink
Merge pull request #24 from phnmnl/feature/postgres-pvc
Browse files Browse the repository at this point in the history
Feature/postgres pvc
  • Loading branch information
andersla authored Jun 20, 2018
2 parents ff4f1dc + 2a88cd6 commit 975c10d
Show file tree
Hide file tree
Showing 15 changed files with 140 additions and 525 deletions.
5 changes: 5 additions & 0 deletions playbooks/create-postgres-sc.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
- hosts: master
gather_facts: "False"
roles:
- {role: postgres-gluster-sc}
5 changes: 3 additions & 2 deletions playbooks/install-phenomenal-playbook.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
- name: Installs phenomenal services
hosts: master
gather_facts: "yes"
gather_facts: "no"
vars:
# Services
jupyter_include: "true"
Expand All @@ -15,6 +15,7 @@
dashboard_hostname: "dashboard"
# Storage
pvc_name: "galaxy-pvc"
postgres_pvc_name: "postgres-pvc"
# Domain
cloudflare_proxied: "false"
proxied_suffix: ""
Expand All @@ -37,7 +38,7 @@
galaxy_admin_email: "your @ email.com"
galaxy_api_key: "{{galaxy_generated_key}}"
galaxy_pvc: "{{pvc_name}}"
postgres_pvc: "{{pvc_name}}"
postgres_pvc: "{{postgres_pvc_name}}"
nologging: "{{no_sensitive_logging}}"
when:
galaxy_include | bool == true
Expand Down
45 changes: 45 additions & 0 deletions playbooks/roles/postgres-gluster-sc/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
---
- name: retrieve gluster-pod IPs
command: >
kubectl get pods
--namespace=storage-heketi
-o jsonpath='{.items[?(@.spec.containers[*].name=="glusterfs")].status.podIP}'
register: get_ips

- name: set fact enpoint_list
set_fact:
enpoint_list: "[\"{{get_ips.stdout | replace(' ','\",\"')}}\"]"

- name: retrieve heketi endpoint
command: >
kubectl get endpoints
--namespace=storage-heketi
-o jsonpath='{.items[?(@.metadata.name=="heketi")].subsets[*].addresses[*].ip}:{.items[?(@.metadata.name=="heketi")].subsets[*].ports[*].port}'
register: get_heketi_endpoint

- name: set fact get_heketi_endpoint
set_fact:
heketi_endpoint: "{{ get_heketi_endpoint.stdout }}"

- name: set fact glusternode_count
set_fact:
glusterpod_count: "{{ (enpoint_list | length) | int }}"

# volumtype (replication factor): <VolumeDurability>:number eg. none:1, replicate:2, replicate:3 , disperse:???
# in jinja2 - means remove any whitespace before this bracket
- name: set fact volumetype
set_fact:
volumetype: "{% if (glusterpod_count | int) == 1 %}none:1
{%- elif (glusterpod_count | int) == 2 %}replicate:2
{%- else %}replicate:3{% endif %}"
when: volumetype is undefined

- name: render storage-class-pg-optimized
template:
src: storage-class-pg-optimized.yml
dest: "~/.kubernetes-yaml/heketi/storage-class-pg-optimized.yml"

- name: create storage-class-pg-optimized
command: >
kubectl apply -f
$HOME/.kubernetes-yaml/heketi/storage-class-pg-optimized.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: sc-pg-optimized
namespace: storage-heketi
annotations:
storageclass.kubernetes.io/is-default-class: "false"
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{{ heketi_endpoint }}"
volumetype: {{volumetype}}
volumeoptions:
performance.quick-read off, performance.read-ahead off,
performance.io-cache off, performance.stat-prefetch off,
performance.low-prio-threads 32, network.remote-dio enable,
cluster.eager-lock enable, disperse.eager-lock enable,
cluster.quorum-type auto, cluster.server-quorum-type server,
cluster.data-self-heal-algorithm full, cluster.locking-scheme granular,
cluster.shd-wait-qlength 10000, features.shard on, user.cifs off
mountOptions:
- entry-timeout=0
- attribute-timeout=0
107 changes: 0 additions & 107 deletions templates-phenomenal/config.tfvars.aws-template

This file was deleted.

103 changes: 0 additions & 103 deletions templates-phenomenal/config.tfvars.azure-template

This file was deleted.

Loading

0 comments on commit 975c10d

Please sign in to comment.