Thank you all for this great info, we will include it in the docs, so other users can benefit from it.
Make sure that if you use vnc over ssl (wss) and strict-transport-security (which you should!) in your ssl config in nginx you will need to include your domain there too or else your browser will not allow to open the VNC window. This took me a while to figure out.
# extra security settings
add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security max-age=31536000;
add_header Content-Security-Policy "default-src 'self'; connect-src 'self' ws://localhost wss://www.example.com:29876
Hey Martijn,
we did not configure our nginx with these headers, however things work
perfectly. I am not sure, why you needed to add the domain - are you
accessing sunstone via various domains?
Cheers,
Nico
No but it adds extra security to your SSL setup, we like the A+ at ssllabs https://www.ssllabs.com/ssltest/analyze.html?d=ui.hpccloud.surfsara.nl&latest
Hi,
If I proxy cluster IP from NAT to the public get the error “VNC noVNC ready: native WebSockets, canvas rendering”
For example.
Cloud-1 - 192.168.2.2
Cloud-2 - 192.168.2.3
Cloud-3 - 192.168.2.4
Cluster IP under Corosync - 192.168.2.5
And I proxy it with nginx to public 111.111.111.111 with domain mydomain.com
server {
listen 80;
server_name mydomain.com;
rewrite ^(.*)$ https://mydomain.com$1 permanent;
}
server {
listen 443;
ssl on;
ssl_certificate /etc/letsencrypt/live/mydomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/mydomain.com/privkey.pem;
location / {
proxy_pass http://192.168.2.5:9869/;
proxy_redirect off;
log_not_found off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-FOR $proxy_add_x_forwarded_for;
}
}
I tried with wss, but it`s doesnt work.
Roman opennebula@discoursemail.com writes:
Hi,
If I proxy cluster IP from NAT to the public get the error “VNC noVNC ready: native WebSockets, canvas rendering”
[…]
I tried with wss, but it`s doesnt work.
Hello,
We opened a feature request some time ago[1] for this.
Regards.
Footnotes:
[1] Backlog #3538: Enable path based NoVNC - OpenNebula - OpenNebula Development pages
I have working VNC + Nginx + SSL without any problems.
pid /run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
client_max_body_size 5000m;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
....
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
server {
listen 443;
server_name _;
ssl on;
ssl_certificate /etc/ssl/certs/cert.crt;
ssl_certificate_key /etc/ssl/certs/cert.key;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
proxy_pass http://127.0.0.1:9869;
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
location /nginx_status {
# Turn on nginx stats
stub_status on;
# I do not need logs for stats
access_log off;
# Security: Only allow access from localhost IP #
allow 127.0.0.1;
# Send rest of the world to /dev/null #
deny all;
}
}
}
:vnc_proxy_port: 29876
:vnc_proxy_support_wss: only
:vnc_proxy_cert: /etc/ssl/certs/cert.crt
:vnc_proxy_key: /etc/ssl/certs/cert.key
:vnc_proxy_ipv6: false
:vnc_request_password: true
Hi,
You proxied sunstone with ClusterIP?
I have 3 nodes with configuration as you wrote. (192.168.2.1
, 192.168.2.2
, 192.168.2.3
)
Corosync unites this servers with ClusterIP 192.168.2.5
I proxies ClusterIP 192.168.2.1
with NGINX+SSL to public network with domain myopennebula.example
NGINX.CONF
upstream sunstone {
server 192.168.2.5:9869;
}
upstream websocketproxy {
server 192.168.2.5:29876;
}
server {
listen *:80;
server_name myopennebula.example;
rewrite ^(.*)$ https://myopennebula.example$1 permanent;
}
server {
listen 443;
ssl on;
ssl_certificate /etc/letsencrypt/live/myopennebula.example/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/myopennebula.example/privkey.pem;
access_log /var/log/nginx/opennebula-sunstone-access.log;
error_log /var/log/nginx/opennebula-sunstone-error.log;
# To upload ISO files, increase for VMs images
client_max_body_size 1G;
location / {
proxy_pass http://sunstone;
proxy_redirect off;
log_not_found off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-FOR $proxy_add_x_forwarded_for;
}
location /websockify {
proxy_pass http://websocketproxy;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
COROSYNC.CONF
# Please read the corosync.conf.5 manual page
totem {
version: 2
crypto_cipher: none
crypto_hash: none
interface {
ringnumber: 0
bindnetaddr: 192.168.2.0
mcastport: 5405
ttl: 1
}
transport: udpu
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
to_syslog: yes
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: QUORUM
debug: off
}
}
quorum {
provider: corosync_votequorum
}
service {
name: pacemaker
ver: 1
}
nodelist {
node {
ring0_addr: CloudKVM-1
nodeid: 1
}
node {
ring0_addr: CloudKVM-2
nodeid: 2
}
node {
ring0_addr: CloudKVM-3
nodeid: 3
}
COROSYNC SHOW CONFIGURATION
node 1: CloudKVM-1
node 2: CloudKVM-2
node 3: CloudKVM-3
primitive ClusterIP IPaddr2 \
params ip=192.168.2.5 cidr_netmask=23 \
op monitor interval=5s
primitive opennebula-novnc_p systemd:opennebula-novnc \
op monitor interval=10s timeout=100s \
op start interval=0 timeout=100s \
op stop interval=0 timeout=100s
primitive opennebula-sunstone_p systemd:opennebula-sunstone \
op monitor interval=10s timeout=100s \
op start interval=0 timeout=100s \
op stop interval=0 timeout=100s
primitive opennebula_p systemd:opennebula \
op monitor interval=10s timeout=100s \
op start interval=0 timeout=100s \
op stop interval=0 timeout=100s
group Opennebula_HA ClusterIP opennebula_p opennebula-sunstone_p opennebula-novnc_p
location cli-prefer-Opennebula_HA Opennebula_HA role=Started inf: CloudKVM-3
property cib-bootstrap-options: \
have-watchdog=false \
dc-version=1.1.15-11.el7_3.2-e174ec8 \
cluster-infrastructure=corosync \
stonith-enabled=false \
no-quorum-policy=ignore \
last-lrm-refresh=1486635972