$$distribution = '${distribution}'

$$hdfs_deployed = ${hdfs_deployed}
$$realm = '${realm}'
$$ssl = false

$$master = '${master_hostname}.${domain}'
$$frontends = [
  '${master_hostname}.${domain}',
]
$$nodes = suffix(${nodes}, '.${domain}')
$$zookeepers = [
  $$master,
]

if $$distribution == 'bigtop' {
  $$version = '1.5.0' # 1.4.0, 1.5.0
  $$hadoop_version = 2
  $$oozie_version = 4
} elsif $$distribution == 'cloudera' {
  $$version = '6.3.0'
  $$hadoop_version = 3
  $$oozie_version = 5
}
$$hive_schema_file = "$${distribution}-$${version}" ? {
  'bigtop-1.4.0'   => 'hive-schema-2.3.0.mysql.sql',
  'bigtop-1.5.0'   => 'hive-schema-2.3.0.mysql.sql',
  'cloudera-6.3.0' => 'hive-schema-2.1.1.mysql.sql',
  'cloudera-6.3.2' => 'hive-schema-2.1.1.mysql.sql',
  default          => undef, # stringify_facts=false required
}
$$db_type = "$${operatingsystem}-$${operatingsystemmajrelease}" ? {
  'Debian-9' => 'mysql',
  default    => 'mariadb',
}

$$principals = suffix(concat(
	prefix(concat([$$master], $$nodes), 'host/'),
	prefix(concat([$$master], $$nodes), 'HTTP/'),
	["httpfs/$$master"],
	prefix(concat([$$master], $$nodes), 'hbase/'),
	["hive/$$master"],
	prefix($$nodes, 'dn/'),
	["jhs/$$master"],
	["nfs/$$master"],
	prefix($$nodes, 'nm/'),
	["nn/$$master"],
	["oozie/$$master"],
	["rm/$$master"],
	["spark/$$master"],
	["zookeeper/$$master"]
), "@$${realm}")

stage { 'kerberos':
  before => Stage['main'],
}

class{"kerberos":
  kadmin_hostname    => $$master,
  admin_principal    => "puppet/admin@$${realm}",
  admin_password     => '$kerberos_admin_password',
  master_password    => '$kerberos_master_password',
  realm              => $$realm,
  default_attributes => {
    'requires_preauth' => true,
  },
  default_policy     => 'default_host',
  stage              => 'kerberos',
}

class{'hadoop':
  acl                    => true,
  hdfs_hostname          => $$master,
  yarn_hostname          => $$master,
  historyserver_hostname => $$master,
  httpfs_hostnames       => [
    $$master,
  ],
  frontends              => $$frontends,
  oozie_hostnames        => [
    $$master,
  ],
  slaves                 => $$nodes,
  zookeeper_hostnames    => $$zookeepers,
  hdfs_name_dirs         => [
    '/data',
  ],
  hdfs_data_dirs         => [
    '/data',
  ],
  cluster_name           => '${domain}',
  https                  => $$ssl,
  realm                  => $$realm,
  features               => {
    'yellowmanager' => true,
    'aggregation'   => true,
  },
  properties             => {
    'dfs.replication' => 2,
    'hadoop.proxyuser.hive.groups' => "hive,impala,oozie,users",
    #'hadoop.proxyuser.hive.groups' => "*",
    'hadoop.proxyuser.hive.hosts' => "*",
  },
  version                => $$hadoop_version,
  hdfs_deployed          => $$hdfs_deployed,
}

class{'hbase':
  acl                 => true,
  frontends           => $$frontends,
  hdfs_hostname       => $$master,
  master_hostname     => $$master,
  slaves              => $$nodes,
  zookeeper_hostnames => $$zookeepers,
  features            => {
    'hbmanager' => true,
  },
  properties          => {
    'hbase.master.info.port' => -1,
    'hbase.regionserver.info.port' => -1,
  },
  realm               => $$realm,
}

class{'hive':
  hdfs_hostname       => $$master,
  metastore_hostname  => $$master,
  server2_hostname    => $$master,
  zookeeper_hostnames => $$zookeepers,
  realm               => $$realm,
  features            => {
    'manager' => true,
  },
  db                  => $$db_type,
  db_password         => 'good-password',
  schema_file         => $$hive_schema_file,
}

#class { 'oozie':
#  acl            => true,
#  db             => $$db_type,
#  db_password    => 'good-password',
#  oozie_hostname => $$master,
#  oozie_sharelib => '/usr/lib/oozie/oozie-sharelib.tar.gz',
#  realm          => $$realm,
#  version        => $$oozie_version,
#}

class { 'spark':
  historyserver_hostname => $$master,
  environment => {
    'LD_LIBRARY_PATH'     => '/usr/lib/hadoop/lib/native:$${LD_LIBRARY_PATH}',
    'SPARK_YARN_USER_ENV' => 'LD_LIBRARY_PATH=$${LD_LIBRARY_PATH},$${SPARK_YARN_USER_ENV}',
  },
  #jar_enable            => true,
  realm                 => $$realm,
}

class { '::zookeeper':
  hostnames => $$zookeepers,
  realm     => $$realm,
}

class{'site_hadoop':
  distribution        => $$distribution,
  version             => $$version,
  accounting_enable   => false,
  hbase_enable        => true,
  nfs_frontend_enable => false,
  oozie_enable        => false,
  pig_enable          => false,
  spark_enable        => true,
}

group{$image_user:
  ensure => 'present',
}
->
user{$image_user:
  gid        => $image_user,
  groups     => ['users'],
  managehome => true,
  shell      => '/bin/bash',
}

class local_kerberos {
  file{'/etc/security/keytab':
    ensure => 'directory',
    owner  => 'root',
    group  => 'root',
    mode   => '0755',
  }

  File['/etc/security/keytab'] -> Kerberos::Keytab <| |>
}

class local_kerberos_master {
  include local_kerberos

  kerberos::policy{'default':
    ensure    => 'present',
    minlength => 6,
    history   => 2,
  }

  kerberos::policy{'default_host':
    ensure    => 'present',
    minlength => 6,
  }

  kerberos::principal{$$::kerberos::admin_principal:
    ensure   => 'present',
    password => $$::kerberos::admin_password,
  }

  kerberos::principal{$$principals:}

  kerberos::keytab{'/etc/krb5.keytab':
    principals => ["host/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/hive.service.keytab':
    principals => ["hive/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/hbase.service.keytab':
    principals => ["hbase/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/http.service.keytab':
    principals => ["HTTP/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/httpfs.service.keytab':
    principals => ["httpfs/$${::fqdn}@$${realm}"],
  }
  # works only locally on Kerberos admin server!
  kerberos::keytab{'/etc/security/keytab/httpfs-http.service.keytab':
    principals => [
      "httpfs/$${::fqdn}@$${realm}",
      "HTTP/$${::fqdn}@$${realm}",
    ],
  }
  kerberos::keytab{'/etc/security/keytab/jhs.service.keytab':
    principals => ["jhs/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/nfs.service.keytab':
    principals => ["nfs/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/nn.service.keytab':
    principals => ["nn/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/oozie.service.keytab':
    principals => ["oozie/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/rm.service.keytab':
    principals => ["rm/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/spark.service.keytab':
    principals => ["spark/$${::fqdn}@$${realm}"],
  }
  kerberos::keytab{'/etc/security/keytab/zookeeper.service.keytab':
    principals => ["zookeeper/$${::fqdn}@$${realm}"],
  }
}

class local_kerberos_node {
  include local_kerberos

  # this will use kerberos::admin_principal and kerberos::admin_password parameters
  kerberos::keytab{'/etc/krb5.keytab':
    principals => ["host/$${::fqdn}@$${realm}"],
    wait       => 600,
  }
  kerberos::keytab{'/etc/security/keytab/dn.service.keytab':
    principals => ["dn/$${::fqdn}@$${realm}"],
    wait       => 600,
  }
  kerberos::keytab{'/etc/security/keytab/hbase.service.keytab':
    principals => ["hbase/$${::fqdn}@$${realm}"],
    wait       => 600,
  }
  kerberos::keytab{'/etc/security/keytab/http.service.keytab':
    principals => ["HTTP/$${::fqdn}@$${realm}"],
    wait       => 600,
  }
  kerberos::keytab{'/etc/security/keytab/nm.service.keytab':
    principals => ["nm/$${::fqdn}@$${realm}"],
    wait       => 600,
  }
}

node /${master_hostname}\..*/ {
  include ::site_hadoop::role::master_hdfs
  include ::site_hadoop::role::master_yarn
  include ::site_hadoop::role::frontend
  include ::hadoop::httpfs
  class { 'mysql::bindings':
    java_enable => true,
    java_package_name => "lib$${db_type}-java",
  }
  class { 'mysql::server':
    root_password  => 'root',
  }
  #include ::oozie::client

  class{'local_kerberos_master':
    stage => 'kerberos',
  }
}

node /${node_hostname}\d*\..*/ {
  include ::site_hadoop::role::slave

  class{'local_kerberos_node':
    stage => 'kerberos',
  }
}