http://www.cnblogs.com/gzggyy/archive/2013/05/14/3077510.html
2016年9月25日 # jfinal+freemarker+jquery mobile 開發出web應用, 然后找個殼子打包成apk應用。
http://www.cnblogs.com/gzggyy/archive/2013/05/14/3077510.html 2016年5月10日 #
ElasticSearch各個版本的apo
https://www.elastic.co/guide/en/elasticsearch/client/java-api/2.2/index.html 2016年5月4日 # dubbo是阿里巴巴的框架,主要有4部分組成,1 服務臺提供方, 2 服務注冊方 3 服務消費分 4 監控部分 1. 注冊方一般用zookeeper, 先下載安裝,啟動zkservece.cmd 會報錯,需要修改配置文件 zoo.cfg. 路徑中conf/下,,沒有自己加一個。 正常啟動 2. 編寫一個服務器端, 創建maven項目 pom.xml文件 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>DubboService</groupId> <artifactId>DubboService</artifactId> <version>0.0.1-SNAPSHOT</version> <build/> <dependencies> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>3.8.1</version> <scope>test</scope> </dependency> <dependency> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> <version>1.1.1</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>dubbo</artifactId> <version>2.5.3</version> </dependency> <dependency> <groupId>org.javassist</groupId> <artifactId>javassist</artifactId> <version>3.18.1-GA</version> </dependency> <dependency> <groupId>log4j</groupId> <artifactId>log4j</artifactId> <version>1.2.15</version> <exclusions> <exclusion> <groupId>com.sun.jdmk</groupId> <artifactId>jmxtools</artifactId> </exclusion> <exclusion> <groupId>com.sun.jmx</groupId> <artifactId>jmxri</artifactId> </exclusion> <exclusion> <artifactId>jms</artifactId> <groupId>javax.jms</groupId> </exclusion> <exclusion> <artifactId>mail</artifactId> <groupId>javax.mail</groupId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring</artifactId> <version>2.5.6.SEC03</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>1.7.6</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>1.6.1</version> </dependency> <dependency> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> <version>3.4.5</version> <type>pom</type> </dependency> <dependency> <groupId>com.101tec</groupId> <artifactId>zkclient</artifactId> <version>0.4</version> </dependency> </dependencies> <repositories> <repository> <id>spring-snapshots</id> <url>http://repo.spring.io/libs-snapshot</url> </repository> </repositories> </project> config/applicationProvider.xml 配置文件,里面定義了注冊的bean, 和zookeeper的地址 <?xml version="1.0" encoding="UTF-8"?> <beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dubbo="http://code.alibabatech.com/schema/dubbo" xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://code.alibabatech.com/schema/dubbo http://code.alibabatech.com/schema/dubbo/dubbo.xsd "> <dubbo:application name="hello-world" /> <!-- 注冊地址 --> <dubbo:registry address="zookeeper://localhost:2181" /> <dubbo:protocol name="dubbo" port="20880" /> <!-- Service interface Concurrent Control --> <dubbo:service interface="cn.zto.service.IProcessData" ref="demoService" executes="10" /> <!-- designate implementation --> <bean id="demoService" class="cn.zto.service.impl.ProcessDataImpl" /> </beans> IProcessData定義接口及實現類 package cn.zto.service.impl; import cn.zto.service.IProcessData; public class ProcessDataImpl implements IProcessData { public String hello(String name) { System.out.println(name); return "hello : " + name; }} package cn.zto.service; 啟動服務public interface IProcessData { public String hello(String name); } package cn.zto.app; 運行起來。如下import org.springframework.context.support.ClassPathXmlApplicationContext; public class Main { public static void main(String[] args) throws Exception { ClassPathXmlApplicationContext context=new ClassPathXmlApplicationContext( new String[] { "config/applicationProvider.xml" }); context.start(); System.out.println("按任意鍵退出"); System.in.read(); }} log4j:WARN No appenders could be found for logger (org.springframework.context.support.ClassPathXmlApplicationContext). log4j:WARN Please initialize the log4j system properly. 按任意鍵退出 下面再建一個client程序 pom.xml <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>DubboClient</groupId> <artifactId>DubboClient</artifactId> <version>0.0.1-SNAPSHOT</version> <build/> <dependencies> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>3.8.1</version> <scope>test</scope> </dependency> <dependency> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> <version>1.1.1</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>dubbo</artifactId> <version>2.5.3</version> </dependency> <dependency> <groupId>org.javassist</groupId> <artifactId>javassist</artifactId> <version>3.18.1-GA</version> </dependency> <dependency> <groupId>log4j</groupId> <artifactId>log4j</artifactId> <version>1.2.15</version> <exclusions> <exclusion> <groupId>com.sun.jdmk</groupId> <artifactId>jmxtools</artifactId> </exclusion> <exclusion> <groupId>com.sun.jmx</groupId> <artifactId>jmxri</artifactId> </exclusion> <exclusion> <artifactId>jms</artifactId> <groupId>javax.jms</groupId> </exclusion> <exclusion> <artifactId>mail</artifactId> <groupId>javax.mail</groupId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring</artifactId> <version>2.5.6.SEC03</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>1.7.6</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>1.6.1</version> </dependency> <dependency> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> <version>3.4.5</version> <type>pom</type> </dependency> <dependency> <groupId>com.101tec</groupId> <artifactId>zkclient</artifactId> <version>0.4</version> </dependency> </dependencies> <repositories> <repository> <id>spring-snapshots</id> <url>http://repo.spring.io/libs-snapshot</url> </repository> </repositories> </project> 注冊的接口類,和要服務器端的包路徑一致 package cn.zto.service; public interface IProcessData { public String hello(String name); } 客戶端的配置文件,接口定義及zookeeper的地址 <?xml version="1.0" encoding="UTF-8"?> <beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dubbo="http://code.alibabatech.com/schema/dubbo" xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://code.alibabatech.com/schema/dubbo http://code.alibabatech.com/schema/dubbo/dubbo.xsd "> <dubbo:application name="consumer-of-helloworld-app" /> <!-- 注冊地址 --> <dubbo:registry address="zookeeper://localhost:2181"/> <dubbo:consumer timeout="5000" /> <dubbo:reference id="demoService" interface="cn.zto.service.IProcessData"/> </beans> 客戶端啟動 package cn.zto.consumer; import org.springframework.context.support.ClassPathXmlApplicationContext; import cn.zto.service.IProcessData; public class ConsumerThd{ public void sayHello(){ ClassPathXmlApplicationContext context=new ClassPathXmlApplicationContext( new String[] {"config/applicationProvider.xml"}); context.start(); IProcessData demoService=(IProcessData) context.getBean("demoService"); System.out.println(demoService.hello("world")); } public static void main(String args[]){ new ConsumerThd().sayHello(); } } 運行結果如下 log4j:WARN No appenders could be found for logger (org.springframework.context.support.ClassPathXmlApplicationContext). log4j:WARN Please initialize the log4j system properly. hello : world 這樣就基本實現了dubbo的框架,很簡單吧
spring boot 是spring 4.0提供的微框架,支持 jdk 1.8, maven 3以上, 否則會報一些錯誤。
1. pom 文件, 主要寫依賴關系, <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.example</groupId> <artifactId>spring-boot</artifactId> <version>0.0.1-SNAPSHOT</version> <name>spring-boot-example</name> <parent> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>1.3.3.RELEASE</version> </parent> <!-- Add typical dependencies for a web application --> <dependencies> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> </dependencies> <repositories> <repository> <id>spring-snapshots</id> <url>http://repo.spring.io/libs-snapshot</url> </repository> </repositories> <pluginRepositories> <pluginRepository> <id>spring-snapshots</id> <url>http://repo.spring.io/libs-snapshot</url> </pluginRepository> </pluginRepositories> <build> <plugins> <plugin> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-maven-plugin</artifactId> </plugin> </plugins> </build> </project> 2. 然后就是提供的接口類UserController, 實體類User, 主要是幾個注解 @RestController l類同spring mvc 的@Controller @RequestMapping 類同spring mvc package two; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; @RestController @RequestMapping("/user") public class UserController { @RequestMapping("/hello") public User view(){ User user=new User(); user.setId((long)100); user.setName("fanjs"); return user; } package two; public class User { private Long id; private String name; public Long getId() { return id; } public void setId(Long id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } 3. 然后就是程序啟動,這里不需要spring xml文件,完全依賴注解。 @EnableAutoConfiguration @Configuration @ComponentScan package two; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.EnableAutoConfiguration; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.Configuration; @Configuration @ComponentScan @EnableAutoConfiguration public class RunMain { /** * @param args */ public static void main(String[] args) { // TODO Auto-generated method stub SpringApplication.run(RunMain.class, args); } } 4. 測試路徑 http://localhost:8080/user/hello {"id":100,"name":"fanjs"} 1、maven包查詢: http://mvnrepository.com/ http://mirrors.ibiblio.org/pub/mirrors/maven2/ http://gradle.artifactoryonline.com/gradle/plugins http://www.jarvana.com/jarvana/browse/ http://maven.alfresco.com/nexus/content/groups/public 2016年5月3日 # facet 自己理解就是分組聚合用的, 如下說明 http://blog.csdn.net/a925907195/article/details/47257243 Solr中的group與facet的區別 如果是簡單的使用的話,那么Facet與group都可以用來進行數據的聚合查詢,但是他們還是有很大的區別的。 首先上facet跟group的操作: Facet的例子: public voidFacetFieldQuery() throws Exception { solrServer = createSolrServer(); SolrQueryquery = newSolrQuery();//建立一個新的查詢 query.setQuery("jobsName:計算機維護"); query.setFacet(true);//設置facet=on // 分類信息分為:薪水,發布時間,教育背景,工作經驗,公司類型,工作類型 query.addFacetField(new String[] {"salary","publishDate", "educateBackground","jobExperience","companytype","jobsType" });//設置需要facet的字段 query.setFacetLimit(10);// 限制facet返回的數量 query.setFacetMissing(false);//不統計null的值 query.setFacetMinCount(1);// 設置返回的數據中每個分組的數據最小值,比如設置為1,則統計數量最小為1,不然不顯示
//query.addFacetQuery("publishDate:[2014-04-11T00:00:00Z TO2014-04-13T00:00:00Z]"); QueryResponseresponse = solrServer.query(query); System.out.println("查詢時間:" + response.getQTime()); List<FacetField>facets = response.getFacetFields();//返回的facet列表 for (FacetField facet :facets) { System.out.println(facet.getName()); System.out.println("----------------"); List<Count>counts = facet.getValues(); for (Count count : counts){ System.out.println(count.getName()+":"+ count.getCount()); } System.out.println(); }
} 運行結果如下: 查詢時間:66 salary ---------------- 面議:6882 2001-4000:1508 其他:671 4001-6000:536 3000-4499:224 2000-2999:181 6001-8000:179 3000-5000:82 1000-2000:81 4500-5999:75
publishDate ---------------- 2014-08-05T00:00:00Z:793 2014-08-04T00:00:00Z:775 2014-07-30T00:00:00Z:601 2014-08-07T00:00:00Z:548 2014-08-06T00:00:00Z:539 2014-08-11T00:00:00Z:472 2014-08-20T00:00:00Z:439 2014-08-12T00:00:00Z:438 2014-08-01T00:00:00Z:405 2014-08-03T00:00:00Z:376
educateBackground ---------------- 大專:4486 本科:1872 其他:1344 不限:1147 中專:680 高中:472 薪水范圍::430 中技:161 初中:140 碩士:94
jobExperience ---------------- 其他:2623 不限:2249 1-3年:1770 1年:1301 2年:773 3-4年:528 3-5年:379 應屆畢業生:309 5-7年:162 1年以上:136
companytype ---------------- 民營公司:3702 民營:2605 國企:835 股份制企業:729 其他:707 合資:632 外資(非歐美):377 外商獨資:350 外資(歐美):271 上市公司:228
jobsType ---------------- 全職:10734 兼職:59 實習:39
Group查詢: /**group查詢 * @throws Exception */ public void GroupFieldQuery() throws Exception { solrServer = createSolrServer(); SolrQuery query = new SolrQuery("jobsName:計算機維護"); // 設置通過facet查詢為true,表示查詢時使用facet機制 query.setParam(GroupParams.GROUP,true); query.setParam(GroupParams.GROUP_FIELD,"salary"); // 設置每個quality對應的 query.setParam(GroupParams.GROUP_LIMIT,"1"); // 設置返回doc文檔數據,因只需要數量,故設置為0 query.setRows(10); QueryResponse response = solrServer.query(query); if (response !=null) { GroupResponse groupResponse =response.getGroupResponse(); if(groupResponse !=null) { List<GroupCommand> groupList =groupResponse.getValues(); for(GroupCommand groupCommand : groupList){ List<Group> groups =groupCommand.getValues(); for(Group group : groups) { System.out.println("group查詢..."+group.getGroupValue()+"數量為:"+group.getResult().getNumFound()); } } } }
} group查詢...面議數量為:6882 group查詢...4500-5999數量為:75 group查詢...2001-4000數量為:1508 group查詢...其他數量為:671 group查詢...2000-2999數量為:181 group查詢...4001-6000數量為:536 group查詢...2000-4000數量為:19 group查詢...2000-3000數量為:34 group查詢...3000-4499數量為:224 group查詢...3000-5000數量為:82
facet的查詢結果主要是分組信息:有什么分組,每個分組包括多少記錄;但是分組中有哪些數據是不可知道的,只有進一步搜索。 The Grouping feature only works if groups are inthe same shard. You must use the custom sharding feature to use the Groupingfeature.
兩者其實用起來還是有比較大的區別的,但是如果說區別的話可以看下wiki上的這段 Field Collapsing and Result Grouping aredifferent ways to think about the same Solr feature. Field Collapsing collapsesa group of results with the same field value down to a single (or fixed number)of entries. For example, most search engines such as Google collapse on site soonly one or two entries are shown, along with a link to click to see moreresults from that site. Field collapsing can also be used to suppress duplicatedocuments. Result Grouping groupsdocuments with a common field value into groups, returning the top documentsper group, and the top groups based on what documents are in the groups. Oneexample is a search at Best Buy for a common term such as DVD, that shows thetop 3 results for each category ("TVs &Video","Movies","Computers", etc)
下面這兩個查詢語句一個是facet的一個是group的 http://localhost:8080/solr/JobsOtherWeb0/select?q=jobsName%3A%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%BB%B4%E6%8A%A4&group=true&group.field=salary&group.limit=1&rows=10
其中facet查詢出的如下:(只截取部分結果)
根據條件查詢出的是查詢結果,facet是聚類后的信息跟查詢條件是分開的,查詢結果也跟facet沒關系。 但是下面看group查詢的
也就是你的查詢條件是跟group相關的,返回的查詢結果也是跟group相關的,比如說你想要查詢的結果在每個分組中 都有數據采集,那么就最好用group,這樣出來的數據跟group也是相關的,但是有個問題,比如說你要查詢group每個采集1個,ok那么你查詢的 時候的條件rows就無效了(也不能說無效,主要是看你怎么使用),就是最多每個分組給你返回一個,多了沒有了。 再細說點就是如果你想查詢歸查詢聚類歸聚類,那么使用facet,如果想使用類似采集的效果,每個group分組采集多少個,那么使用group查詢。 2014年1月6日 # [root@f2c node_work]# cat ServiceRoute.js
/************************* * 服務路由接口 * @author * **************************/ var http = require('http'), url = require('url'), amqplib = require('amqplib'), async = require('async'), uuid = require('node-uuid'); var open = require('amqplib').connect('amqp://10.0.16.101:5672'); http.createServer(function(req, res){ /*** 參數判斷 ***/ var param = url.parse(req.url).query; if(param==null || param=="") { res.writeHead(200, {'Content-Type':'text/html'}); res.write("no message", 'utf8'); res.end(); } else { /*** 參數處理 ***/ console.log("*****************start*******************"); var paramArr = param.split("&"); var messageInfo = paramArr[0].split("="); var queueName = messageInfo[0]; var b = new Buffer(messageInfo[1], 'base64'); var mes = b.toString(); console.log("*param="+new Date().toLocaleString()); console.log("*param="+param); console.log("*request message = "+mes); var callBackInfo = null; if(paramArr.length>=2) callBackInfo = paramArr[1].split("="); /*** 返回隊列 ***/ var uuIdStr = uuid.v1()+"_a"; var common_options = {durable: false, autoDelete:true, exclusive:true}; /*** 接收消息 ***/ open.then(function(conn){ var ok = conn.createChannel(); ok = ok.then(function(ch){ ch.assertQueue(uuIdStr, common_options); var onSecond = 1000 * 1; var timer=setTimeout(function(){ console.log("*setTimeOut"); res.write('{"s":-1, "error":"channel connect time out"}', 'utf8'); ch.close(); }, onSecond); ch.consume(uuIdStr, function(msg) { console.log("*response="+msg.content.toString()); ch.close(); clearTimeout(timer) /*** 返回信息到客戶端 ***/ if(callBackInfo!=null) res.write(callBackInfo[1]+"('"+msg.content.toString().replace("'", "\'")+"')", 'utf8'); else res.write(msg.content.toString(), 'utf8'); res.end(); }); ch.on("error", function(err){ console.log("*response error="+err); }); ch.on("close", function(){ console.log("*response close method is called"); }); }); }).then(null, console.warn); /*** 發送消息 ***/ open.then(function(conn) { var ok = conn.createChannel(); ok = ok.then(function(ch){ ch.sendToQueue(queueName, new Buffer(mes), {correlationId:uuIdStr}); ch.close(); ok.then(ok.close.bind(ok)); }); }).then(null, console.warn); } }).listen(8081); 2013年11月23日 #
netty是一套高性能的通訊架構,這里我用netty實現http服務器實現信息采集功能。主要是利用他現有的hander處理器,解析出request頭,做信息采集使用,重寫了他自己的hander.
package io.netty.example.http.snoop;
import static io.netty.handler.codec.http.HttpHeaders.getHost; import static io.netty.handler.codec.http.HttpHeaders.isKeepAlive; import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION; import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH; import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE; import static io.netty.handler.codec.http.HttpHeaders.Names.COOKIE; import static io.netty.handler.codec.http.HttpHeaders.Names.SET_COOKIE; import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST; import static io.netty.handler.codec.http.HttpResponseStatus.OK; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; import java.util.List; import java.util.Map; import java.util.Set; import java.util.Map.Entry; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.Cookie; import io.netty.handler.codec.http.CookieDecoder; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpObject; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http.QueryStringDecoder; import io.netty.handler.codec.http.ServerCookieEncoder; import io.netty.util.CharsetUtil; public class HttpSnoopServiceTxt extends SimpleChannelInboundHandler<Object> { private HttpRequest request; /** Buffer that stores the response content */ private final StringBuilder buf = new StringBuilder(); @Override protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { // TODO Auto-generated method stub if (msg instanceof HttpRequest) { HttpRequest request = this.request = (HttpRequest) msg; buf.setLength(0); // hostname buf.append("HOSTNAME:").append(getHost(request, "unknown")); // url buf.append("REQUEST_URI:").append(request.getUri()); // parm QueryStringDecoder queryStringDecoder = new QueryStringDecoder(request.getUri()); Map<String, List<String>> params = queryStringDecoder.parameters(); if (!params.isEmpty()) { for (Entry<String, List<String>> p : params.entrySet()) { String key = p.getKey(); List<String> vals = p.getValue(); for (String val : vals) { buf.append("PARAM:").append(key).append("=") .append(val); } } } //cookie } if (msg instanceof HttpContent) { if (msg instanceof LastHttpContent) { LastHttpContent trailer = (LastHttpContent) msg; writeResponse(trailer, ctx); WriterFile.printtxt(buf.toString()); } } } @Override public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { ctx.flush(); } private boolean writeResponse(HttpObject currentObj,ChannelHandlerContext ctx) { boolean keepAlive = isKeepAlive(request); FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, currentObj.getDecoderResult().isSuccess() ? OK : BAD_REQUEST, Unpooled.copiedBuffer(buf.toString(), CharsetUtil.UTF_8)); response.headers().set(CONTENT_TYPE, "text/plain; charset=UTF-8"); if (keepAlive) { response.headers().set(CONTENT_LENGTH, response.content().readableBytes()); response.headers().set(CONNECTION, HttpHeaders.Values.KEEP_ALIVE); } ctx.write(response); return keepAlive; } } /* * Copyright 2012 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.example.http.snoop; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.Channel; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioServerSocketChannel; /** * An HTTP server that sends back the content of the received HTTP request * in a pretty plaintext form. */ public class HttpSnoopServer { private final int port; public HttpSnoopServer(int port) { this.port = port; } public void run() throws Exception { // Configure the server. EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .childHandler(new HttpSnoopServerInitializer()); Channel ch = b.bind(port).sync().channel(); ch.closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } public static void main(String[] args) throws Exception { int port; if (args.length > 0) { port = 8080; } else { port = 8080; } new HttpSnoopServer(port).run(); } } /* * Copyright 2012 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.example.http.snoop; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelPipeline; import io.netty.channel.socket.SocketChannel; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; public class HttpSnoopServerInitializer extends ChannelInitializer<SocketChannel> { @Override public void initChannel(SocketChannel ch) throws Exception { // Create a default pipeline implementation. ChannelPipeline p = ch.pipeline(); // Uncomment the following line if you want HTTPS //SSLEngine engine = SecureChatSslContextFactory.getServerContext().createSSLEngine(); //engine.setUseClientMode(false); //p.addLast("ssl", new SslHandler(engine)); p.addLast("decoder", new HttpRequestDecoder()); // Uncomment the following line if you don't want to handle HttpChunks. //p.addLast("aggregator", new HttpObjectAggregator(1048576)); p.addLast("encoder", new HttpResponseEncoder()); // Remove the following line if you don't want automatic content compression. //p.addLast("deflater", new HttpContentCompressor()); p.addLast("handler", new HttpSnoopServiceTxt()); //p.addLast("handler", new HttpSnoopServerHandler()); } }
電商系統需要記錄用戶行為,需要一個高并發高速寫入文件,考慮利用緩存和noi機制寫入數據,具體邏輯是2塊緩存區,一塊寫數據,一塊寫文件,交替進行,并且利用noi機制一次寫入數據。
測試結果: 1億條數據用時93秒,生產58個100m文件。每一條953納秒。 package io.netty.example.http.snoop;
import java.io.FileOutputStream; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.CharBuffer; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; import java.nio.charset.CharsetDecoder; import java.nio.charset.CharsetEncoder; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; public class WriterFile { // 指定大小為 1024 的緩沖區 public static ByteBuffer bytebufferone = ByteBuffer.allocate(102400000); public static ByteBuffer bytebuffertwo = ByteBuffer.allocate(102400000); public static boolean checkbuffer =true; public static void main(String[] args) { long start = System.nanoTime(); for(int i=0;i<100000000;i++){ if(checkbuffer) processone("123abc"+i+"\r\n"); else prcesstwo("123abc"+i+"\r\n"); } long end = System.nanoTime(); System.out.println((end - start)+"耗時"); } /** * bytebuffertwo寫日志 */ public static void prcesstwo(String log) { //寫bytebuff boolean onecheck=checkposition(log,bytebuffertwo); if(onecheck) writerbuffer(log,bytebuffertwo); //寫文件 else{ checkbuffer=true; writerbuffer(log,bytebufferone); writerfile(bytebuffertwo); } } /** * bytebufferone寫日志 * @param log */ public static void processone(String log) { //寫bytebuff boolean onecheck=checkposition(log,bytebufferone); if(onecheck){ writerbuffer(log,bytebufferone); } //寫文件 else{ checkbuffer=false; writerbuffer(log,bytebuffertwo); writerfile(bytebufferone); } } /** * 判斷緩存是否可以寫下日志 * @param log * @return */ public static boolean checkposition(String log,ByteBuffer bytebuffer) { if(2*log.getBytes().length>bytebuffer.limit()-bytebuffer.position()) { return false; } else { return true; } } /** * 寫日志到緩存,并且返回緩存指針位置 * @param log * @return */ public static int writerbuffer(String log,ByteBuffer bytebuffer ) { for (int i = 0; i < log.length(); i++) { bytebuffer.putChar(log.charAt(i)); } return bytebuffer.position(); } /** * 寫文件 * @param filename */ public static void writerfile(ByteBuffer bytebuffer) { try{ FileOutputStream fos = new FileOutputStream(Datefile()); FileChannel fc = fos.getChannel(); bytebuffer.flip(); fc.write(bytebufferone); fc.close(); fos.close(); bytebuffer.clear(); } catch(Exception ex) { ex.printStackTrace(); } } /** * 文件名按日期生產 * @param str * @return */ public static String Datefile() { SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd_HHmmss"); String str = format.format(new Date()); return "d:/test/"+str+".txt"; } } 附帶一個普通的nio讀寫 public static void test() { try{ FileOutputStream fos = new FileOutputStream("d:/nio.txt"); // 得到文件通道 FileChannel fc = fos.getChannel(); // 指定大小為 1024 的緩沖區 ByteBuffer bf = ByteBuffer.allocate(1024); // 要寫入文件的字符串 String greeting = "Hello111"; // 把以上字符串逐字放入緩沖區 for (int i = 0; i < greeting.length(); i++) { bf.putChar(greeting.charAt(i)); } // 記得執行這個方法,使得 position=0, limit=30, 才能寫入正確的數據 // 否則 position 為 30, limit 為 1024,將會把 30 之后的全部空數據(0) 填到文件中 System.out.println(greeting.getBytes().length); System.out.println(bf.position()); System.out.println(bf.limit()); bf.flip(); // 緩沖區數據寫入到文件中,會把緩沖區中從 position 到 limit 之間的數據寫入文件 fc.write(bf); fc.close(); // 關閉文件通道 fos.close(); // 關閉文件輸出流 }catch(Exception e){ e.printStackTrace(); } } |