HDFS表引擎:
hadoop fs -mkdir /clickhouse
hadoop fs -R chown clickhouse:clickhouse /clickhouse
create table hdfs_table1(
id UInt32,
code String,
name String
) ENGINE = HDFS('hdfs://hdp1.nauu.com:8020/clickhouse/hdfs_table1','CSV');
备注:删除表后hdfs的文件不会清除
MYSQL表引擎:
ENGINE = MySQL(‘host"port','database','table','user','password')
JDBC::
https://github.com/ClickHouse/clickhouse-jdbc-bridge
ENGINE = JDBC('jdbc:url','database','table');
<jdbc_bridge>
<host>ch5.nauu.com</host>
<port>9019</port>
</jdbc_bridge>
KAFKA表引擎:
ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'host:port',......',
kafka_topic_list = 'topic1,topic2,......',
kafka_group_name = 'group_name',
kafka_format = 'data_format',
[kafka_row_delimiter = 'delimiter_symbol']
[kafka_schema = '']
[kafka_num_consumers = N]
[kafka_skip_broken_messages = N]
[kafka_commit_every_batch = N]
FILE表引擎:
ENGINE = File(format)
create table file_table(
name String,
value UInt32
) ENGINE = File("CSV");
/opt/clickhouse/data/data/default/file_table 目录下会保存csv格式文件
Memory表引擎:
create table memory_1(
id UInt64
) ENGINE = Memory();
Set表引擎:(不能直接使用select语句去查询,只能间接作为IN查询的右侧条件被查询使用)
create table set_1
(
id UInt8
) ENGINE = Set();
select arrayJoin([1,2,3]) as a where a in set_1;
Join表引擎
ENGINE = Join(join_strictness,join_type,key1[,key2......])
join_strictness:连接精度,目前支持ALL\ANY\ASOF
join_type:连接类型,支持INNER\OUTER\CROSS
join_key:连接件,决定了使用哪个列字段进行关联
create table id_join_tb1(
id UInt8,
price UInt32,
time Datetime
) ENGINE = Join(ANY,LEFT,id);
Buffer表引擎:
ENGINE = Buffer(database,table,num_layers,min_time,max_time,min_rows,max_rows,min_bytes,max_bytes)
database:目标数据库
table:目标表的名称
num_layers:线程数,以并行的方式将数据刷新到目标表,在阈值条件满足时才会刷新
min_time和max_time:时间条件的最小和最大值,单位为秒
min_rows和max_rows:数据行条件的最小和最大值
min_bytes和max_bytes:数据体量条件的最小和最大值,单位为字节
create table buffer_to_memory_1 as memory_1 engine = Buffer(default,memory_1,16,10,100,10000,1000000,1000000,10000000);
TinyLog表引擎:
create table tinylog_1(
id UInt64,
code UInt64
) ENGINE= TinyLog();
StripeLog表引擎
create table spripelog_1(
id UInt64,
price Float32
) ENGINE = StripeLog();
Log表引擎:
create table log_1(
id UInt 64,
code UInt64
) ENGINE = log();
Merge表引擎:
ENGINE = Merge(database,tbname)
create table merge_tb1(
id String,
create_time Datetime,
code String
) ENGINE = Merge(default,'test');
Dictionary表引擎:
ENGINE = Dictonary(dict_name)
create table tb_test_flat_dict(
id UInt64,
code String,
name String
) ENGINE = Dictionary(test_flat_dict);
Distributed表引擎:
Live View表引擎:类似于时间监听器,能够将一条SQL查询结果作为监控目标
select name,value from system.settings where name like '%live_view%';
set allow_experimental_live_view = 1 时属于开启状态
create table origin_table2(id UInt64) ENGINE = MergeTree;
create live view lv_origin as select count(*) from origin_table1;
watch lv_origin 进入监听模式
Null表引擎:与/dev/null很相似
create table null_table1( id UInt8) ENGINE = Null;
create materialized view view_null_table ENGINE = Null as select * from null_table1;
URL表引擎:作用等价于HTTP客户端
ENGINE = URL('url',format)
create table url_table(name String) ENGINE = URL('http://client1.nauu.com:3000/users',JSONEachRow)
「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。




