您的位置:首页 > Web前端 > HTML5

CDH权限测试示例

2016-10-17 14:54 239 查看

1.  准备测试数据

cat/tmp/events.csv  
10.1.2.3,US,android,createNote
10.200.88.99,FR,windows,updateNote
10.1.2.3,US,android,updateNote
10.200.88.77,FR,ios,createNote
10.1.4.5,US,windows,updateTag

2.  创建用户

2.1.  创建系统用户

在安装sentry server的节点创建系统用户并设置密码

useradd user1
passwd user1
useradd user2
passwd user2
useradd user3
passwd user3

2.2.  创建kerberos用户

kadmin.local -q "addprinc user1"
kadmin.local -q "addprinc user2"
kadmin.local -q "addprinc user3"

3.  创建数据库和表

3.1.  创建数据库

admin为sentry的超级管理员,该用户配置权限时已设置

kinit admin

通过beeline连接 hiveserver2,运行下面命令创建hive库的超级管理员角色, 并将该角色赋予admin组,使admin有操作hive库的权力

beeline -u "jdbc:hive2://vmw208:10000/;principal=hive/vmw208@HADOOP.COM"
create role admin_role;
GRANT ALL ON SERVER server1 TO ROLE admin_role;
GRANT ROLE admin_role TO GROUP admin;

创建两个测试数据库

create database db1;
create database db2;

3.2.  创建表

在两个测试数据库中各创建一张测试表,并导入测试数据

create table db1.table1 (
    ip STRING, country STRING,client STRING, action STRING
  ) ROW FORMAT DELIMITED FIELDSTERMINATED BY ',';
 
 create table db2.table1 (
    ip STRING, country STRING,client STRING, action STRING
  ) ROW FORMAT DELIMITED FIELDSTERMINATED BY ',';
 create table db2.table2 (
    ip STRING, country STRING,client STRING, action STRING
  ) ROW FORMAT DELIMITED FIELDSTERMINATED BY ',';
 
 
 load data local inpath '/home/iie/events.csv'overwrite into table db1.table1;
 load data local inpath '/home/iie/events.csv'overwrite into table db2.table1;
 load data local inpath '/home/iie/events.csv'overwrite into table db2.table2;

4.  赋予用户权限

4.1.  给user1赋予db1的所有权限

create role user1_role;
GRANT ALL ON DATABASE db1 TO ROLE user1_role;
GRANT ROLE user1_role TO GROUP user1;

4.2.  给user2赋予db2的所有权限

create role user2_role;
GRANT ALL ON DATABASE db2 TO ROLE user2_role;
GRANT ROLE user2_role TO GROUP user2;

4.3.  给user3赋予db2.table1的所有权限

create role user3_role;
use db2;
GRANT select ON table table1 TO ROLE user3_role;
GRANT ROLE user3_role TO GROUP user3;
 

 

5.  测试用户权限

5.1.  Hive测试

5.1.1.  admin用户拥有整个hive库的权限

kinit admin
beeline -u"jdbc:hive2://vmw208:10000/;principal=hive/vmw208@HADOOP.COM"
show databases;

5.1.2.  user1用户只具有db1和default的权限

kinit user1
beeline -u"jdbc:hive2://vmw208:10000/;principal=hive/vmw208@HADOOP.COM"
0: jdbc:hive2://vmw208:10000/> show databases;
+----------------+--+
| database_name  |
+----------------+--+
| db1            |
| default        |
+----------------+--+

5.1.3.  user2用户只具有db2和default的权限

kinit user2
beeline -u"jdbc:hive2://vmw208:10000/;principal=hive/vmw208@HADOOP.COM"
0: jdbc:hive2://vmw208:10000/> show databases;
+----------------+--+
| database_name  |
+----------------+--+
| db2            |
| default        |
+----------------+--+

5.1.4.  user3用户只具有db2.table1和default的权限

kinit user2
beeline -u"jdbc:hive2://vmw208:10000/;principal=hive/vmw208@HADOOP.COM"
0: jdbc:hive2://vmw208:10000/> show databases;
+----------------+--+
| database_name  |
+----------------+--+
| db2            |
| default        |
+----------------+--+
0: jdbc:hive2://node17:10000/> use db2;
0: jdbc:hive2://node17:10000/> show tables;
INFO  : OK
+-----------+--+
| tab_name  |
+-----------+--+
| table1    |
+-----------+--+
 

 

5.2.  Hdfs测试

配置hdfs acl与sentry同步后,hdfs权限与sentry监控的目录(/user/hive/warehouse)的权限同步

5.2.1.  切换到hive用户,查看hive库文件的权限

设置hdfs acl与sentry同步后,sentry监控的hive库的权限改动会同步到对应的hdfs文件权限

[root@vmw208 home]# kinit hive
[root@vmw208 home]# hdfs dfs -getfacl -R /user/hive/warehouse/
# file: /user/hive/warehouse
# owner: hive
# group: hive
user::rwx
user:hive:rwx
group::---
group:hive:rwx
mask::rwx
other::--x
 
# file: /user/hive/warehouse/db1.db
# owner: hive
# group: hive
user::rwx
user:hive:rwx
group:user1:rwx
group::---
group:hive:rwx
mask::rwx
other::--x
 
# file: /user/hive/warehouse/db1.db/table1
# owner: hive
# group: hive
user::rwx
user:hive:rwx
group:user1:rwx
group::---
group:hive:rwx
mask::rwx
other::--x
 
# file: /user/hive/warehouse/db1.db/table1/events.csv
# owner: hive
# group: hive
user::rwx
user:hive:rwx
group:user1:rwx
group::---
group:hive:rwx
mask::rwx
other::--x
 
# file: /user/hive/warehouse/db2.db
# owner: hive
# group: hive
user::rwx
user:hive:rwx
group:user2:rwx
group::---
group:hive:rwx
mask::rwx
other::--x
 
# file: /user/hive/warehouse/db2.db/table1
# owner: hive
# group: hive
user::rwx
user:hive:rwx
group:user2:rwx
group::---
group:hive:rwx
mask::rwx
other::--x
 
# file: /user/hive/warehouse/db2.db/table1/events.csv
# owner: hive
# group: hive
user::rwx
user:hive:rwx
group:user2:rwx
group::---
group:hive:rwx
mask::rwx
other::--x                              

 

5.2.2.  切换到user1用户,查看hdfs文件

[root@vmw208 home]# kinit user1
Password for user1@HADOOP.COM:

[root@vmw208 home]# hdfs dfs -ls /user/hive/warehouse/db2.db
ls: Permission denied: user=user1, access=READ_EXECUTE,inode="/user/hive/warehouse/db2.db":hive:hive:drwxrwx—x
[root@vmw208 home]# hdfs dfs -cat /user/hive/warehouse/db2.db/table1/events.csv
cat: Permission denied: user=user1, access=READ,inode="/user/hive/warehouse/db2.db/table1/events.csv":hive:hive:-rwxrwx--x
 
[root@vmw208 home]# hdfs dfs -ls /user/hive/warehouse/db1.db
Found 1 items
drwxrwx--x+  - hive hive          0 2016-09-29 16:54
/user/hive/warehouse/db1.db/table1
[root@vmw208 home]# hdfs dfs -cat /user/hive/warehouse/db1.db/table1/events.csv
10.1.2.3,US,android,createNote
10.200.88.99,FR,windows,updateNote
10.1.2.3,US,android,updateNote
10.200.88.77,FR,ios,createNote
10.1.4.5,US,windows,updateTag

5.2.3.  切换到user2用户,查看hdfs文件

[root@vmw208 home]# kinit user2
Password for user2@HADOOP.COM:

[root@vmw208 home]#  hdfs dfs -cat/user/hive/warehouse/db1.db/table1/events.csv
cat: Permission denied: user=user2, access=READ,inode="/user/hive/warehouse/db1.db/table1/events.csv":hive:hive:-rwxrwx--x
[root@vmw208 home]#  hdfs dfs -cat/user/hive/warehouse/db2.db/table1/events.csv
10.1.2.3,US,android,createNote
10.200.88.99,FR,windows,updateNote
10.1.2.3,US,android,updateNote
10.200.88.77,FR,ios,createNote
10.1.4.5,US,windows,updateTag    

5.3.  Spark测试

5.3.1.   Spark读hive表数据并打印到控制台

(1)  切换到user1用户测试

[root@vmw209 xdf]# kinit user1
Password for user1@HADOOP.COM:

[root@vmw209 xdf]# spark-submit --class iie.hadoop.permission.QueryTable--master local /home/xdf/spark.jar
db2 table1
……
Exception in thread "main"org.apache.hadoop.security.AccessControlException:
Permissiondenied: user=user1, access=READ_EXECUTE,inode="/user/hive/warehouse/db2.db/table1":hive:hive:drwxrwx—x
[root@vmw209 xdf]# spark-submit --class iie.hadoop.permission.QueryTable--master local /home/xdf/spark.jar
db1 table1
……
+------------+-------+-------+----------+
|          ip|country| client|    action|
+------------+-------+-------+----------+
|    10.1.2.3|     US|android|createNote|
|10.200.88.99|    FR|windows|updateNote|
|    10.1.2.3|     US|android|updateNote|
|10.200.88.77|     FR|    ios|createNote|
|    10.1.4.5|     US|windows| updateTag|
+------------+-------+-------+----------+
 

(2)  切换到user2用户测试

 

[root@vmw209 xdf]# kinit user2
Password for user2@HADOOP.COM:

[root@vmw209 xdf]# spark-submit --class iie.hadoop.permission.QueryTable--master local /home/xdf/spark.jar
db1 table1
……
Exception in thread "main"org.apache.hadoop.security.AccessControlException:
Permissiondenied: user=user2, access=READ_EXECUTE,inode="/user/hive/warehouse/db1.db/table1":hive:hive:drwxrwx—x
[root@vmw209 xdf]# spark-submit --class iie.hadoop.permission.QueryTable--master local /home/xdf/spark.jar
db2 table1
……
+------------+-------+-------+----------+
|          ip|country| client|    action|
+------------+-------+-------+----------+
|    10.1.2.3|     US|android|createNote|
|10.200.88.99|    FR|windows|updateNote|
|    10.1.2.3|     US|android|updateNote|
|10.200.88.77|     FR|    ios|createNote|
|    10.1.4.5|     US|windows| updateTag|
+------------+-------+-------+----------+
 

5.3.2.  Spark读文件数据写入hive表中

调用工具程序spark.jar读本地文件/home/xdf/events.csv数据写到db2.table2

切换到user2用户测试

kinit user2
beeline -u "jdbc:hive2://vmw208:10000/;principal=hive/vmw208@HADOOP.COM"
use db2;
create table table2 (
    ip STRING, country STRING,client STRING, action STRING
  ) ROW FORMAT DELIMITED FIELDSTERMINATED BY ',';
[root@vmw209 xdf]# spark-submit --classiie.hadoop.permission.HCatWriterTest --master local /home/xdf/spark.jar/home/xdf/events.csv db2 table2
成功!
写到db1.table1报错,没有权限!
Exception in thread "main"org.apache.hive.hcatalog.common.HCatException : 2004 : HCatOutputFormat notinitialized, setOutput has to be called. Cause :org.apache.hadoop.security.AccessControlException:
Permissiondenied: user=user2, access=WRITE,inode="/user/hive/warehouse/db1.db/table1":hive:hive:drwxrwx--x

 
上面只是测试环境,因为kinit + 密码的方式有时效限制,不适合在生产环境运行,幸好spark提供了相关的参数:

spark-submit
……
--principal    # 用户对应的kerberos principle
--keytab       # 对应用户principle生成的密钥文件

spark的权限管理通过对hdfs/hive的文件目录设置权限来管理,不同的用户拥有不同的权限,用户在提交spark任务时,指定对应用户的kerberos principle和keytab来实现权限管理。任务提交命令如下:

spark-submit --class iie.hadoop.permission.QueryTable --master local--principal=user1@HADOOP.COM --keytab=/home/user1/user1.keytab /home/user1/spark.jardb1 table1

其中--principal 和--keytab与用户一一对应

5.4.  Kafka测试

5.4.1.  认证

用户kafka为kafka权限控制的超级管理员

[root@node10 iie]#kinit -kt /home/iie/kafka.keytab kafka

5.4.2.  创建topic

创建topic1和topic2

[root@node10 iie]#kafka-topics --zookeeper node11:2181/kafka --create--topic
topic1 --partitions 2--replication-factor 1
[root@node10 iie]#kafka-topics --zookeeper node11:2181/kafka --create--topic
topic2 --partitions 2--replication-factor 1

5.4.3.  赋权

给user1用户附topic1的读写权限

[root@node10 iie]#kafka-acls --authorizer-properties zookeeper.connect=node11:2181/kafka--add --allow-principal User:user1 --allow-host node10 
--producer --topic topic1 --group console-consumer-9175
[root@node10 iie]#kafka-acls --authorizer-properties zookeeper.connect=node11:2181/kafka--add --allow-principal User:user1 --allow-host node10 
--consumer --topic topic1 --group console-consumer-9175

给user2用户附topic2的读写权限

[root@node10 iie]#kafka-acls --authorizer-properties zookeeper.connect=node11:2181/kafka--add --allow-principal User:user2 --allow-host node10 
--producer --topic topic2 --group console-consumer-9175
[root@node10 iie]#kafka-acls --authorizer-properties zookeeper.connect=node11:2181/kafka--add --allow-principal User:user2 --allow-host node10 
--consumer --topic topic2 --group console-consumer-9175

5.4.4.  查看权限

[root@node10 iie]#kafka-acls --authorizer-properties zookeeper.connect=node11:2181/kafka--list
Current ACLs for resource `Topic:topic1`:

       User:user1 has Allowpermission for operations:
Write from hosts: node10
       User:user1 has Allowpermission for operations:
Read from hosts: node10
Current ACLs for resource `Topic:topic2`:

       User:user2 has Allowpermission for operations: Read
from hosts: node10
       User:user2 has Allowpermission for operations: Write from hosts: node10

5.4.5.  创建生产消费配置文件

创建consumer.properties

cat /etc/kafka/conf/consumer.properties

security.protocol=SASL_PLAINTEXT
sasl.mechanism=GSSAPI
sasl.kerberos.service.name=kafka
group.id=console-consumer-9175

创建producer.properties

cat /etc/kafka/conf/producer.properties

security.protocol=SASL_PLAINTEXT
sasl.mechanism=GSSAPI
sasl.kerberos.service.name=kafka

5.4.6.  生产数据

命令行生产数据

[root@node10 iie]#kinit user1
[root@node10 iie]#kafka-console-producer --broker-list node12:9092 --topictopic1 --producer.config /etc/kafka/conf/producer.properties
123123
123123

5.4.7.  消费数据

命令行消费数据

[root@node10 iie]#kinit user1
[root@node10 iie]#kafka-console-consumer --bootstrap-server node12:9092--topic topic1 --new-consumer --from-beginning --consumer.config/etc/kafka/conf/consumer.properties
123123
123123
 

用户对topic没有权限时报错

[root@node10 iie]# kinit user2
Password for user2@HADOOP.COM:

[root@node10 iie]# kafka-console-consumer --bootstrap-server node12:9092--topic
topic1 --new-consumer --from-beginning --consumer.config/etc/kafka/conf/consumer.properties
[2016-10-12 15:38:01,599] ERROR Unknown error when running consumer:  (kafka.tools.ConsoleConsumer$)
org.apache.kafka.common.errors.TopicAuthorizationException:
Not authorized to access topics: [topic1]

5.4.8.  移除权限

登陆管理员用户移除权限

[root@node10 iie]#kinit -kt /home/iie/kafka.keytab kafka

                                
删除user1对topic1的消费权限

[root@node10 iie]# kafka-acls --authorizer-properties zookeeper.connect=node11:2181/kafka--remove --allow-principal User:user1 --allow-host node10
--consumer --topic
topic1 --group console-consumer-92175
Are you sure you want to remove ACLs:

       User:user1has Allow permission for operations: Read from hosts: node10
       User:user1 has Allowpermission for operations: Describe from hosts: node10

 from resource `Topic:topic1`? (y/n)
y
Are you sure you want to remove ACLs:

       User:user1has Allow permission for operations: Read from hosts: node10

 from resource `Group:console-consumer-92175`?(y/n)
y
Current ACLs for resource `Topic:topic1`:

       User:Aluser1has Allow permission for operations: Read from hosts: node10
       User:Aluser1 has Allowpermission for operations: Describe from hosts: node10
       User:user1 has Allowpermission for operations: Write from hosts: node10

 
Current ACLs for resource `Group:console-consumer-92175`:

 

测试user1消费topic1报错,说明权限已经移除

[root@node10 iie]# kinit user1
Password for user1@HADOOP.COM:

[root@node10 iie]# kafka-console-consumer --bootstrap-server node12:9092--topic topic1 --new-consumer --from-beginning --consumer.config/etc/kafka/conf/consumer.properties
[2016-10-12 15:45:11,572] WARN The configuration sasl.mechanism = GSSAPIwas supplied but isn't a known config. (org.apache.kafka.clients.consumer.ConsumerConfig)
[2016-10-12 15:45:11,914] WARN Not authorized to read from topic topic1.(org.apache.kafka.clients.consumer.internals.Fetcher)
[2016-10-12 15:45:11,916] ERROR Error processing message, terminatingconsumer process: (kafka.tools.ConsoleConsumer$)
org.apache.kafka.common.errors.TopicAuthorizationException: Not authorizedto access topics: [topic1]
[2016-10-12 15:45:11,920] WARN Not authorized to read from topic topic1.(org.apache.kafka.clients.consumer.internals.Fetcher)
[2016-10-12 15:45:11,921] ERROR Not authorized to commit to topics[topic1] for group console-consumer-9175(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator)
[2016-10-12 15:45:11,922] WARN Auto offset commit failed for groupconsole-consumer-9175: Not authorized to access topics: [topic1](org.apache.kafka.clients.consumer.internals.ConsumerCoordinator)
[2016-10-12 15:45:11,927] WARN TGT renewal thread has been interrupted andwill exit. (org.apache.kafka.common.security.kerberos.Login)
Processed a total of 0 messages

 

 
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息