
刚学习java web开发,写了一个简单的程序,主要是用了spring、springMVC、mybatis框架,配置的数据库连接池是阿里的druid。程序执行过程中,传入的sql为`insert into tableName values ('20021015','01','01','2016-01-19-08.57.19.181489','2016-02-19-08.57.19.181489','03','2016-03-19-08.57.19.181489','2016-04-19-08.57.19.181489'),`('20021015','01','02','2016-05-19-08.57.19.181489','2016-06-19-08.57.19.181489','03','2016-07-19-08.57.19.181489','2016-08-19-08.57.19.181489')使用的数据是db2,sql中插入两条记录,那一长串在数据库表中TIMESTAMP类型。日志中ERROR内容如下:`2017-06-01 21:55:26-ERROR com.alibaba.druid.filter.stat.StatFilter:147-mergeSql merge sql error, dbType db2, sql : insert into tableName values ('20021015','01','01','2016-01-19-08.57.19.181489','2016-02-19-08.57.19.181489','03','2016-03-19-08.57.19.181489','2016-04-19-08.57.19.181489'),('20021015','01','02','2016-05-19-08.57.19.181489','2016-06-19-08.57.19.181489','03','2016-07-19-08.57.19.181489','2016-08-19-08.57.19.181489')com.alibaba.druid.sql.parser.ParserException: syntax error, error in :'489','2016-04-19-08.57.19.181489')',expect COMMA, actual COMMA 2016-04-19-08.57.19.181489 at com.alibaba.druid.sql.parser.SQLParser.printError(SQLParser.java:232) at com.alibaba.druid.sql.parser.SQLStatementParser.parseStatementList(SQLStatementParser.java:407) at com.alibaba.druid.sql.parser.SQLStatementParser.parseStatementList(SQLStatementParser.java:145) at com.alibaba.druid.sql.parser.SQLStatementParser.parseStatementList(SQLStatementParser.java:140) at com.alibaba.druid.sql.visitor.ParameterizedOutputVisitorUtils.parameterize(ParameterizedOutputVisitorUtils.java:53) at com.alibaba.druid.filter.stat.StatFilter.mergeSql(StatFilter.java:145) at com.alibaba.druid.filter.stat.StatFilter.createSqlStat(StatFilter.java:630) at com.alibaba.druid.filter.stat.StatFilter.statementPrepareAfter(StatFilter.java:305) at com.alibaba.druid.filter.FilterEventAdapter.connection_prepareStatement(FilterEventAdapter.java:124) at com.alibaba.druid.filter.FilterChainImpl.connection_prepareStatement(FilterChainImpl.java:448) at com.alibaba.druid.proxy.jdbc.ConnectionProxyImpl.prepareStatement(ConnectionProxyImpl.java:342) at com.alibaba.druid.pool.DruidPooledConnection.prepareStatement(DruidPooledConnection.java:331) at sun.reflect.GeneratedMethodAccessor49.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.ibatis.logging.jdbc.ConnectionLogger.invoke(ConnectionLogger.java:55) at com.sun.proxy.$Proxy31.prepareStatement(Unknown Source) at org.apache.ibatis.executor.statement.PreparedStatementHandler.instantiateStatement(PreparedStatementHandler.java:79) at org.apache.ibatis.executor.statement.BaseStatementHandler.prepare(BaseStatementHandler.java:88) at org.apache.ibatis.executor.statement.RoutingStatementHandler.prepare(RoutingStatementHandler.java:58) at org.apache.ibatis.executor.SimpleExecutor.prepareStatement(SimpleExecutor.java:76) at org.apache.ibatis.executor.SimpleExecutor.doUpdate(SimpleExecutor.java:48) at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:115) at org.apache.ibatis.executor.CachingExecutor.update(CachingExecutor.java:75) at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:170) at org.apache.ibatis.session.defaults.DefaultSqlSession.insert(DefaultSqlSession.java:157) at sun.reflect.GeneratedMethodAccessor61.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:408) at com.sun.proxy.$Proxy13.insert(Unknown Source) at org.mybatis.spring.SqlSessionTemplate.insert(SqlSessionTemplate.java:254) at org.apache.ibatis.binding.MapperMethod.execute(MapperMethod.java:52) at org.apache.ibatis.binding.MapperProxy.invoke(MapperProxy.java:53) at com.sun.proxy.$Proxy14.insert(Unknown Source) at com.unionpay.css.db2client.service.impl.CommonServiceImpl.add(CommonServiceImpl.java:21) at com.unionpay.css.db2client.controller.CommonController.db2Operation(CommonController.java:54) at sun.reflect.GeneratedMethodAccessor48.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:221) at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:136) at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:110) at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:817) at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:731) at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:85) at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:959) at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:893) at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:968) at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:870) at javax.servlet.http.HttpServlet.service(HttpServlet.java:648) at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:844) at javax.servlet.http.HttpServlet.service(HttpServlet.java:729) at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:292) at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:207) at org.apache.tomcat.websocket.server.WsFilter.doFilter(WsFilter.java:52) at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:240) at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:207) at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:121) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:107) at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:240) at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:207) at org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:212) at org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:106) at org.apache.catalina.authenticator.AuthenticatorBase.invoke(AuthenticatorBase.java:502) at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:141) at org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:79) at org.apache.catalina.valves.AbstractAccessLogValve.invoke(AbstractAccessLogValve.java:616) at org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:88) at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:528) at org.apache.coyote.http11.AbstractHttp11Processor.process(AbstractHttp11Processor.java:1099) at org.apache.coyote.AbstractProtocol$AbstractConnectionHandler.process(AbstractProtocol.java:670) at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun(NioEndpoint.java:1520) at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.run(NioEndpoint.java:1476) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61) at java.lang.Thread.run(Thread.java:745)`看了下db2,2条记录已经写进去了,不知道为什么会这样。druid版本为1.0.18。
1.元字符'.'用来匹配一个任意字符 2.元字符''用来转义字符,与其他元字符在一起时,匹配元字符本身 3.元字符'[]'用来匹配一组字符中的任意一个,如'[abcd]',可以用元字符'-'来表示一个区间,例如'[A-Z]',在字符集合意外的地方,'-'字符表示一个普通的字符,不需要转义,在字符集合里面使用'^'表示取非,例如'1',表示除数字之外的任意一个字符 4.匹配空白字符。Windows系统中'rnrn'匹配空白行,Linux系统中'nn'匹配空白行 5.匹配特定字符类别。'd'表示任意一个数字字符,'D'表示任意一个非数字字符,'w'任意一个数字、字母、下划线字符,'W'则相反,'s'匹配任意一个空白字符,'S'则相反。例如: zhang@DESKTOP-CO960ET ~ $ echo "hello" | grep -P '\w' hello zhang@DESKTOP-CO960ET ~ $ echo "hello" | grep -P '\D' hello 6.使用POSIX字符类。[:alnum:]匹配任意一个字母或数字;[:alpha:]匹配任意一个字母;[:blank:]匹配空格或制表符;[:cntrl:]匹配控制字符;[:digit:]匹配任意一个数字;[:lower:]匹配任意一个小写字母;[:space:]匹配任意一个空白字符;[:upper:]匹配任意一个大写字母;[:xdigit:]匹配任意一个十六进制数字,等价于[a-fA-F0-9]。例如: zhang@DESKTOP-CO960ET ~ $ echo "sss" | grep -P '[[:xdigit:]]' zhang@DESKTOP-CO960ET ~ $ echo "sssA" | grep -P '[[:xdigit:]]' sssA zhang@DESKTOP-CO960ET ~ $ echo "12_A" | grep -P '[[:lower:]]' zhang@DESKTOP-CO960ET ~ $ echo "12_A" | grep -P '[[:upper:]]' 12_A zhang@DESKTOP-CO960ET ~ $ echo "12_A" | grep -P '[[:digit:]]' 12_A 7.元字符'+'放在字符或字符集合后面用来匹配一个或多个字符,与''在一起时,用来匹配'+'本身。例如: zhang@DESKTOP-CO960ET ~ $ echo "a2" | grep -P -o '\d+' 2 zhang@DESKTOP-CO960ET ~ $ echo "a" | grep -P -o '\d+' zhang@DESKTOP-CO960ET ~ $ echo "a" | grep -P -o '[[:digit:]]+' zhang@DESKTOP-CO960ET ~ $ echo "a2" | grep -P -o '[[:digit:]]+' 2 zhang@DESKTOP-CO960ET ~ $ echo "a2" | grep -P -o '[a2]+' a2 zhang@DESKTOP-CO960ET ~ $ echo "a22" | grep -P -o '[a2]+' a22 8.元字符'*'放在一个字符或字符集后面,就可以匹配该字符或字符集合连续出现零次或多次的情况,与''一起使用时,匹配自身;元字符'?'放在字符或字符集合后面时,在匹配一个字符的零次或1次出现,与''一起使用时,用来匹配自身。例如: zhang@DESKTOP-CO960ET ~ $ echo "a" | grep -P '2*' a zhang@DESKTOP-CO960ET ~ $ echo "a" | grep -P '2?' a zhang@DESKTOP-CO960ET ~ $ echo "a" | grep -P '2+' 9.匹配的重复次数。元字符'{}'放在字符或字符集合后面,表示匹配的次数。具体:'{n}',表示匹配n次;'{n,m}'表示匹配次数为闭区间[n,m];'{n,}'表示匹配至少n次。与''一起使用时,表示'{'、'}'字符自身 10.防止过度匹配。贪婪型元字符''对应的懒惰型为'?','+'对应的为'+?','{n,}'对应的为'{n,}?'。区别见例子: zhang@DESKTOP-CO960ET ~ $ echo "AAhelloAA and AAworldAA" | grep -P -o 'AA.*?AA' AAhelloAA AAworldAA zhang@DESKTOP-CO960ET ~ $ echo "AAhelloAA and AAworldAA" | grep -P -o 'AA.*AA' AAhelloAA and AAworldAA zhang@DESKTOP-CO960ET ~ $ echo -e 'I paid $30 for 100 apples,\n50 oranges, and 60 pears.\nI saved $5 on this order' | grep -P -o '\d+' 30 100 50 60 5 zhang@DESKTOP-CO960ET ~ $ echo -e 'I paid $30 for 100 apples,\n50 oranges, and 60 pears.\nI saved $5 on this order' | grep -P -o '\d+?' 3 0 1 0 0 5 0 6 0 5 11.位置匹配。'b'匹配单词边界,这个位置位于一个能够用来构成单词的字符(字母、数字、下划线)和一个不能用来构成单词的之间。'B'表明不匹配一个单词边界,例如: zhang@DESKTOP-CO960ET ~ $ echo "Hello cat hhcat sds" | grep -P -o '\bcat' cat zhang@DESKTOP-CO960ET ~ $ echo "Hello cat hhcat sds" | grep -P -o 'cat\b' cat cat zhang@DESKTOP-CO960ET ~ $ echo "Hello cat hhcat sds" | grep -P -o '\bcat\b' cat zhang@DESKTOP-CO960ET ~ $ echo "nine-digit color - coded" | grep -P -o '\B-\B' - 12.'^和'$'用来指定字符串的边界(字符串的开头和结束)。如果与'(?m)'配合使用,'^'和'$'还将匹配一个在换行符处开头或结束的字符串(此时,换行符被视为字符串分割符) 13.子表达式,用'()'括起来,其中'()'为元字符,匹配自身时,需要转移。子表达式表示对整个内容进行匹配。注意'(ab){2}'与'ab{2}'的区别,前者匹配'ab'整体出现2次,后者是'b'出现2次。子表达式允许嵌套。 14.回溯引用:前后一致匹配。指的是模式的后半部分引用在前部分中定义的子表达式。回溯引用匹配通常从1开始计数,例如:查找连续2次重复的单词 zhang@DESKTOP-CO960ET ~ $ echo -e "This is a block of of text,several words here are are repeated, and and they should not be" | grep -P -o '[ ]+(\w+)[ ]+\1' of of are are and and 15.向前向后查找。向前查找指定了一个必须匹配但不在结果中返回的模式,语法上看向前查找实际上就是一个以'?='开头的子表达式,需要匹配的文字在'='后面。向后查找相反,查找出现在匹配文本之前的字符,但不包括它,向后查找操作符是'?<='。向前查找模式的长度是可变的,它可以包含'.'、'*'之类的元字符,而向后查找模式只能是固定长度。与向前向后查找相反分分别为负向前查找('?!')、负向后查找('? zhang@DESKTOP-CO960ET ~ $ echo "http://www.baidu.com" | grep -P -o '.+(?=:)' http zhang@DESKTOP-CO960ET ~ $ echo "http://www.baidu.com" | grep -P -o '.+(:)' http: zhang@DESKTOP-CO960ET ~ $ echo 'AB42: $23.45' | grep -P -o '(?<=\$)[0-9.]+' 23.45 zhang@DESKTOP-CO960ET ~ $ echo 'AB42: $$23.45' | grep -P -o '(?<=\$+)[0-9.]+' grep: lookbehind assertion is not fixed length zhang@DESKTOP-CO960ET ~ $ echo "http://www.baidu.com" | grep -P -o '.+(?=t+)' ht zhang@DESKTOP-CO960ET ~ $ echo "<H1>HelloWorld<H1>" | grep -P -o '(?<=\<H1\>).*(?=\<H1\>)' HelloWorld zhang@DESKTOP-CO960ET ~ $ echo -e 'I paid $30 for 100 apples,\n50 oranges, and 60 pears.\nI saved $5 on this order' | grep -P -o '\b(?<!\$)\d+\b' 100 50 60 zhang@DESKTOP-CO960ET ~ $ echo -e 'I paid $30 for 100 apples,\n50 oranges, and 60 pears.\nI saved $5 on this order' | grep -P -o '\b(?<=\$)\d+\b' 30 5 0-9 ↩
jdbc读db2中记录的A字段时,程序出现以下信息报错:SqlException with message including "Caught java.io.CharConversionException" and ERRORCODE=-4220不清楚db2中那条记录是怎么来的,在网上查了下,http://www-01.ibm.com/support/docview.wss?uid=swg21684365,解决方法上面也有原因大概是:SqlException with message including "Caught java.io.CharConversionException" and ERRORCODE=-4220 if the data in a character column that it queries contains a sequence of bytes that is not a valid UTF-8 string. 不过我电脑上的DbVisualizer可以查看该记录对应的字段值,当然是乱码的。不过我不理解的是,DbVisualizer也是通过jdbc来读数据库的,db2驱动也是一样,不管数据是什么,为啥我写的程序读不出来?(数据库中定义A字段是Varchar类型,所以程序里也是通过getString()方法来回去字段值的)
看一段代码,了解下java中编码 import java.io.UnsupportedEncodingException; public class Test{ public static void main(String[] args) throws UnsupportedEncodingException { System.out.println(System.getProperty("file.encoding")); //gbk String s = "中文"; System.out.println(s); System.out.println(s.length()); String t = new String(s.getBytes(),"utf-8"); System.out.println(t); System.out.println(t.length()); } } 其中,Test.java源文件是utf-8编码的,系统默认编码是GBK输出结果如下所示: E:\testencoding>java Test GBK 涓枃 3 中文 2 我的理解是当用javac Test.java命令时,由于没有指定编码格式,jdk采用系统默认的编码格式将源程序编译成unicode字节码,形成class文件保存。本例中,源文件中的字符串"中文"本来是以utf-8编码保存的一串字节流,但是编译时,按照gbk的格式转换成unicode,因此程序执行时输出s时会乱码,刚好“中文”字符串中两个汉字以utf-8格式保存在文件中,各占用3个字节,而gbk编码格式中这两个汉字各占2个字节,所以将6个字节,按照gbk格式转换成java中的字符串时,长度为3。后面将s先按照当前系统的编码格式(gbk)编码,然后按照utf-8格式解码,这与源文件中最初的"中文"编码、解码一致,不会出现乱码的问题了
在网上参考了一些资料,配置windows7系统上远程访问HDFS集群,包括添加系统变量$HADOOP_HOME\、$HADOOP_USER,配置环境变量$HADOOP_HOME\bin,其中bin目录下有hadoop.dll、winutils.exe等程序,这些是从网上找的,但是在eclipse中运行程序遇到了一个问题,程序代码如下: public class App { public static void main( String[] args ) { Configuration conf = new Configuration(); try { FileSystem fs = FileSystem.get(conf); fs.copyFromLocalFile(new Path("D:\\testhdfs.txt"), new Path("/user/hado_cli/dist")); } catch (IOException e) { e.printStackTrace(); System.out.println("Put file to HDFS Failed"); } try { FileSystem fs = FileSystem.get(conf); fs.copyToLocalFile(new Path("/user/hado_cli/dist/testhdfs.txt"), new Path("D:\\hdfstest")); } catch (IOException e) { e.printStackTrace(); System.out.println("Get file from HDFS Failed"); } } } 其中在执行前一段代码,及将本地文件上传至HDFS,程序是没有问题的,但执行从HDFS拷贝文件到我电脑上时,报了错,如下: Exception in thread "main" java.lang.UnsatisfiedLinkError: org.apache.hadoop.io.nativeio.NativeIO$Windows.createFileWithMode0(Ljava/lang/String;JJJI)Ljava/io/FileDescriptor; at org.apache.hadoop.io.nativeio.NativeIO$Windows.createFileWithMode0(Native Method) at org.apache.hadoop.io.nativeio.NativeIO$Windows.createFileOutputStreamWithMode(NativeIO.java:559) at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:219) at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:209) at org.apache.hadoop.fs.RawLocalFileSystem.createOutputStreamWithMode(RawLocalFileSystem.java:307) at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:295) at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:328) at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.<init>(ChecksumFileSystem.java:393) at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:456) at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:435) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:923) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:904) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:801) at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:368) at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:341) at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:292) at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:2017) at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:1986) at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:1962) 之前由于从网上找的hadoop.dll这些文件版本与集群上的hadoop版本不一致,在上传文件至HDFS时也报了类似的错误,后来换了个版本,现在下载文件仍然报错。我猜测的原因有以下几点: 版本原因,集群上时hadoop2.6.0-cdh5.9.1,程序用的jar包也是这个版本,hadoop.dll这写文件是从网上找的apache-hadoop2.6.0,是版本不一致还是找的这些dll文件有错,那为什么上传文件确可以? 程序验证了下,是可以读HDFS的文件的,但是很看报错好像是在我电脑上无法创建输出流,难道没有权限吗? 万能的网友帮忙看看吧