aboutsummarylogtreecommitdiffstats
path: root/1.install.cluster.sh
blob: 2f983f279285a3ce22d7e5abf53a7cd4bdbf25f4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#!/bin/bash

CONFIG_FILE="$1"


# This script is used to create and start a new cluster as well as start an existing cluster.

# Starts an OpenShift cluster using Docker containers, provisioning a registry, router, initial templates, and a default project.

# Data and config is preserved between restarts with the --use-existing-config flag and --host-data-dir argument.

# A public hostname is specified for the server with the --public-hostname argument.

# A custom routing suffix is specified using the --routing-suffix argument.
# This is to allow dynamic host names to be created for routes.

# This script also adds one user to cluster-admin role.

# You can copy or edit config.sh configuration file. This way you can install more then one cluster. However, only one cluster can run at a time.

# You can run 2.install.certificate.sh to install your server's certificate and private key into cluster.
# Then you will be able to access the web console at https://your-domain:8443/console .


_oc_cluster_up() {

    oc cluster up \
    --use-existing-config \
    --host-data-dir="${ORIGIN_HOME}/openshift.local.etcd" \
    --host-config-dir="${ORIGIN_HOME}/openshift.local.config" \
    --host-volumes-dir="${ORIGIN_HOME}/openshift.local.volumes" \
    --public-hostname="${PUBLIC_HOSTNAME}" \
    --routing-suffix="${ROUTING_SUFFIX}" \
    --version="${VERSION}" \
    --metrics
}


main() {

    if [ -z "${CONFIG_FILE}" ]; then
        echo "You have to pass configuration file."
        echo "Usage:     install.cluster.sh <path-to-config-file>"
        echo "Example: ./install.cluster.sh ./config.sh"
        exit 1
    fi

    source "0.read.config.sh"
    read_config

    _oc_cluster_up

    oc login -u system:admin

    echo "Adding user ${ADMIN_USERNAME} as cluster-admin."
    oadm policy add-cluster-role-to-user cluster-admin "${ADMIN_USERNAME}"
    echo "open https://${PUBLIC_HOSTNAME}:8443/console"
    echo "User:     ${ADMIN_USERNAME}"
    echo "Password: ${ADMIN_USERNAME}"

}

main