hive> select to_date('2011-12-08 10:03:01') from tableName;
2011-12-08
6、日期转年函数: year
语法: year(string date) 返回值: int 说明: 返回日期中的年。
hive> select year('2011-12-08 10:03:01') from tableName;
2011
hive> select year('2012-12-08') from tableName;
2012
7、日期转月函数: month
语法: month (string date) 返回值: int 说明: 返回日期中的月份。
hive> select month('2011-12-08 10:03:01') from tableName;
12
hive> select month('2011-08-08') from tableName;
8
8、日期转天函数: day
语法: day (string date) 返回值: int 说明: 返回日期中的天。
hive> select day('2011-12-08 10:03:01') from tableName;
8
hive> select day('2011-12-24') from tableName;
24
9、日期转小时函数: hour
语法: hour (string date) 返回值: int 说明: 返回日期中的小时。
hive> select hour('2011-12-08 10:03:01') from tableName;
10
10、日期转分钟函数: minute
语法: minute (string date) 返回值: int 说明: 返回日期中的分钟。
hive> select minute('2011-12-08 10:03:01') from tableName;
3
hive> select second('2011-12-08 10:03:01') from tableName;
1
12、日期转周函数: weekofyear
语法: weekofyear (string date) 返回值: int 说明: 返回日期在当前的周数。
hive> select weekofyear('2011-12-08 10:03:01') from tableName;
49
13、日期比较函数: datediff
语法: datediff(string enddate, string startdate) 返回值: int 说明: 返回结束日期减去开始日期的天数。
hive> select datediff('2012-12-08','2012-05-09') from tableName;
213
14、日期增加函数: date_add
语法: date_add(string startdate, int days) 返回值: string 说明: 返回开始日期startdate增加days天后的日期。
hive> select date_add('2012-12-08',10) from tableName;
2012-12-18
15、日期减少函数: date_sub
语法: date_sub (string startdate, int days) 返回值: string 说明: 返回开始日期startdate减少days天后的日期。
hive> select date_sub('2012-12-08',10) from tableName;
2012-11-28
1.3、条件函数
1、If函数: if
语法: if(boolean testCondition, T valueTrue, T valueFalseOrNull) 返回值: T 说明: 当条件testCondition为TRUE时,返回valueTrue;否则返回valueFalseOrNull
hive> select if(1=2,100,200) from tableName;
200
hive> select if(1=1,100,200) from tableName;
100
2、非空查找函数: COALESCE
语法: COALESCE(T v1, T v2, …) 返回值: T 说明: 返回参数中的第一个非空值;如果所有值都为NULL,那么返回NULL
hive> select COALESCE(null,'100','50') from tableName;
100
3、条件判断函数:CASE
语法: CASE a WHEN b THEN c [WHEN d THEN e]* [ELSE f] END 返回值: T 说明:如果a等于b,那么返回c;如果a等于d,那么返回e;否则返回f
hive> Select case 100 when 50 then 'tom' when 100 then 'mary' else 'tim' end from tableName;
mary
hive> Select case 200 when 50 then 'tom' when 100 then 'mary' else 'tim' end from tableName;
tim
4、条件判断函数:CASE
语法: CASE WHEN a THEN b [WHEN c THEN d]* [ELSE e] END 返回值: T 说明:如果a为TRUE,则返回b;如果c为TRUE,则返回d;否则返回e
hive> select case when 1=2 then 'tom' when 2=2 then 'mary' else 'tim' end from tableName;
mary
hive> select case when 1=1 then 'tom' when 2=2 then 'mary' else 'tim' end from tableName;
tom
1.4、字符串函数
1、字符串长度函数:length
语法: length(string A) 返回值: int 说明:返回字符串A的长度
hive> select length('abcedfg') from tableName;
2、字符串反转函数:reverse
语法: reverse(string A) 返回值: string 说明:返回字符串A的反转结果
hive> select reverse('abcedfg') from tableName;
gfdecba
3、字符串连接函数:concat
语法: concat(string A, string B…) 返回值: string 说明:返回输入字符串连接后的结果,支持任意个输入字符串
hive> select concat('abc','def','gh') from tableName;
abcdefgh
4、字符串连接并指定字符串分隔符:concat_ws
语法: concat_ws(string SEP, string A, string B…) 返回值: string 说明:返回输入字符串连接后的结果,SEP表示各个字符串间的分隔符
语法: substr(string A, int start),substring(string A, int start) 返回值: string 说明:返回字符串A从start位置到结尾的字符串
hive> select substr('abcde',3) from tableName;
cde
hive> select substring('abcde',3) from tableName;
cde
hive> select substr('abcde',-1) from tableName; (和ORACLE相同)
e
6、字符串截取函数:substr,substring
语法: substr(string A, int start, int len),substring(string A, int start, int len) 返回值: string 说明:返回字符串A从start位置开始,长度为len的字符串
hive> select substr('abcde',3,2) from tableName;
cd
hive> select substring('abcde',3,2) from tableName;
cd
hive>select substring('abcde',-2,2) from tableName;
de
7、字符串转大写函数:upper,ucase
语法: upper(string A) ucase(string A) 返回值: string 说明:返回字符串A的大写格式
hive> select upper('abSEd') from tableName;
ABSED
hive> select ucase('abSEd') from tableName;
ABSED
8、字符串转小写函数:lower,lcase
语法: lower(string A) lcase(string A) 返回值: string 说明:返回字符串A的小写格式
hive> select lower('abSEd') from tableName;
absed
hive> select lcase('abSEd') from tableName;
absed
create table score_map(name string, score map)
row format delimited fields terminated by '\t'
collection items terminated by ',' map keys terminated by ':';
创建数据内容如下并加载数据
cd /kkb/install/hivedatas/
vim score_map.txt
zhangsan 数学:80,语文:89,英语:95
lisi 语文:60,数学:80,英语:99
加载数据到hive表当中去
load data local inpath '/kkb/install/hivedatas/score_map.txt' overwrite into table score_map;
map结构数据访问:
获取所有的value:
select name,map_values(score) from score_map;
获取所有的key:
select name,map_keys(score) from score_map;
按照key来进行获取value值
select name,score["数学"] from score_map;
查看map元素个数
select name,size(score) from score_map;
创建struct表
hive> create table movie_score( name string, info struct )row format delimited fields terminated by "\t" collection items terminated by ":";
加载数据
cd /kkb/install/hivedatas/
vim struct.txt
ABC 1254:7.4
DEF 256:4.9
XYZ 456:5.4
加载数据
load data local inpath '/kkb/install/hivedatas/struct.txt' overwrite into table movie_score;
hive当中查询数据
hive> select * from movie_score;
hive> select info.number,info.score from movie_score;
OK
1254 7.4
256 4.9
456 5.4
3、array类型构建: array
语法: array(val1, val2, …) 说明:根据输入的参数构建数组array类型
hive> create table person(name string,work_locations array)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\t'
COLLECTION ITEMS TERMINATED BY ',';
加载数据到person表当中去
cd /kkb/install/hivedatas/
vim person.txt
数据内容格式如下
biansutao beijing,shanghai,tianjin,hangzhou
linan changchu,chengdu,wuhan
加载数据
hive > load data local inpath '/kkb/install/hivedatas/person.txt' overwrite into table person;
查询所有数据数据
hive > select * from person;
按照下表索引进行查询
hive > select work_locations[0] from person;
查询所有集合数据
hive > select work_locations from person;
查询元素个数
hive > select size(work_locations) from person;
1.7、复杂型长度统计函数
1.Map类型长度函数: size(Map)
语法: size(Map) 返回值: int 说明: 返回map类型的长度
hive> select size(t) from map_table2;
2
2.array类型长度函数: size(Array)
语法: size(Array) 返回值: int 说明: 返回array类型的长度
hive> select size(t) from arr_table2;
4
3.类型转换函数
类型转换函数: cast 语法: cast(expr as ) 返回值: Expected "=" to follow "type" 说明: 返回转换后的数据类型
hive> select cast('1' as bigint) from tableName;
1
hive (default)> create database hive_explode;
hive (default)> use hive_explode;
第二步:创建hive表,然后使用explode拆分map和array
create table hive_explode.t3(name string,
children array,
address Map)
row format delimited fields terminated by '\t'
collection items terminated by ','
map keys terminated by ':'
stored as textFile;
第三步:加载数据
node03执行以下命令创建表数据文件
cd /kkb/install/hivedatas/
vim maparray
数据内容格式如下
zhangsan child1,child2,child3,child4 k1:v1,k2:v2
lisi child5,child6,child7,child8 k3:v3,k4:v4
hive表当中加载数据
hive (hive_explode)> load data local inpath '/kkb/install/hivedatas/maparray' into table hive_explode.t3;
第四步:使用explode将hive当中数据拆开
将array当中的数据拆分开
hive (hive_explode)> SELECT explode(children) AS myChild FROM hive_explode.t3;
将map当中的数据拆分开
hive (hive_explode)> SELECT explode(address) AS (myMapKey, myMapValue) FROM hive_explode.t3;
hive (hive_explode)>
create table hive_explode.explode_lateral_view (
area string,
goods_id string,
sale_info string)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
STORED AS textfile;
第二步:准备数据并加载数据
准备数据如下
cd /kkb/install/hivedatas
vim explode_json
a:shandong,b:beijing,c:hebei|1,2,3,4,5,6,7,8,9|[{"source":"7fresh","monthSales":4900,"userCount":1900,"score":"9.9"},{"source":"jd","monthSales":2090,"userCount":78981,"score":"9.8"},{"source":"jdmart","monthSales":6987,"userCount":1600,"score":"9.0"}]
加载数据到hive表当中去
hive (hive_explode)> load data local inpath '/kkb/install/hivedatas/explode_json' overwrite into table hive_explode.explode_lateral_view;
第三步:使用explode拆分Array
hive (hive_explode)> select explode(split(goods_id,',')) as goods_id from hive_explode.explode_lateral_view;
第四步:使用explode拆解Map
hive (hive_explode)> select explode(split(area,',')) as area from hive_explode.explode_lateral_view;
第五步:拆解json字段
hive (hive_explode)> select explode(split(regexp_replace(regexp_replace(sale_info,'\\[\\{',''),'}]',''),'},\\{')) as sale_info from hive_explode.explode_lateral_view;
然后我们想用get_json_object来获取key为monthSales的数据:
hive (hive_explode)> select get_json_object(explode(split(regexp_replace(regexp_replace(sale_info,'\\[\\{',''),'}]',''),'},\\{')),'$.monthSales') as sale_info from hive_explode.explode_lateral_view;
然后出现异常FAILED: SemanticException [Error 10081]: UDTF's are not supported outside the SELECT clause, nor nested in expressions
UDTF explode不能写在别的函数内
如果你这么写,想查两个字段,select explode(split(area,',')) as area,good_id from explode_lateral_view;
会报错FAILED: SemanticException 1:40 Only a single expression in the SELECT clause is supported with UDTF's. Error encountered near token 'good_id'
使用UDTF的时候,只支持一个字段,这时候就需要LATERAL VIEW出场了
3、配合LATERAL VIEW使用
配合lateral view查询多个字段
hive (hive_explode)> select goods_id2,sale_info from explode_lateral_view LATERAL VIEW explode(split(goods_id,','))goods as goods_id2;
hive (hive_explode)> select goods_id2,sale_info,area2 from explode_lateral_view LATERAL VIEW explode(split(goods_id,','))goods as goods_id2 LATERAL VIEW explode(split(area,','))area as area2;
也是三个表笛卡尔积的结果
最终,我们可以通过下面的句子,把这个json格式的一行数据,完全转换成二维表的方式展现
hive (hive_explode)> select get_json_object(concat('{',sale_info_1,'}'),'$.source') as source, get_json_object(concat('{',sale_info_1,'}'),'$.monthSales') as monthSales, get_json_object(concat('{',sale_info_1,'}'),'$.userCount') as monthSales, get_json_object(concat('{',sale_info_1,'}'),'$.score') as monthSales from explode_lateral_view LATERAL VIEW explode(split(regexp_replace(regexp_replace(sale_info,'\\[\\{',''),'}]',''),'},\\{'))sale_info as sale_info_1;
hive (hive_explode)> create table person_info( name string, constellation string, blood_type string) row format delimited fields terminated by "\t";
加载数据
hive (hive_explode)> load data local inpath '/kkb/install/hivedatas/constellation.txt' into table person_info;
6.按需求查询数据
hive (hive_explode)> select t1.base, concat_ws('|', collect_set(t1.name)) name from (select name, concat(constellation, "," , blood_type) base from person_info) t1 group by t1.base;
1.9.2、行转列
1.函数说明
EXPLODE(col):将hive一列中复杂的array或者map结构拆分成多行。
LATERAL VIEW
用法:LATERAL VIEW udtf(expression) tableAlias AS columnAlias
用于将分组数据按照顺序切分成n片,返回当前切片值,如果切片不均匀,默认增加第一个切片的分布。NTILE不支持ROWS BETWEEN,比如 NTILE(2) OVER(PARTITION BY cookieid ORDER BY createtime ROWS BETWEEN 3 PRECEDING AND CURRENT ROW)。
CREATE EXTERNAL TABLE cookie_pv (
cookieid string,
createtime string,
pv INT
) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ',' ;
第二步:准备数据并加载
node03执行以下命令,创建数据,并加载到hive表当中去
cd /kkb/install/hivedatas
vim cookiepv.txt
cookie1,2015-04-10,1
cookie1,2015-04-11,5
cookie1,2015-04-12,7
cookie1,2015-04-13,3
cookie1,2015-04-14,2
cookie1,2015-04-15,4
cookie1,2015-04-16,4
cookie2,2015-04-10,2
cookie2,2015-04-11,3
cookie2,2015-04-12,5
cookie2,2015-04-13,6
cookie2,2015-04-14,3
cookie2,2015-04-15,9
cookie2,2015-04-16,7
加载数据到hive表当中去
load data local inpath '/kkb/install/hivedatas/cookiepv.txt' overwrite into table cookie_pv
第三步:使用分析函数来求取每个cookie访问PV的前三条记录
SELECT
cookieid,
createtime,
pv,
RANK() OVER(PARTITION BY cookieid ORDER BY pv desc) AS rn1,
DENSE_RANK() OVER(PARTITION BY cookieid ORDER BY pv desc) AS rn2,
ROW_NUMBER() OVER(PARTITION BY cookieid ORDER BY pv DESC) AS rn3
FROM cookie_pv
WHERE rn1 <= 3 ;
public class MyUDF extends UDF {
public Text evaluate(final Text s) {
if (null == s) {
return null;
}
//**返回大写字母
return new Text(s.toString().toUpperCase());
}
}