您的位置:首页 > 产品设计 > UI/UE

第61天(就业班) Lucene索引库优化、内置分词器、IK分词器、关键字高亮、单字段和多字段搜索、easyui入门

2017-04-11 14:04 525 查看
01_回顾



02_索引库优化_数量和大小问题
2.1 索引库的优化
1)什么是索引库
索引库是Lucene的重要的存储结构,它包括二部份:原始记录表,词汇表
原始记录表:存放的是原始记录信息,Lucene为存入的内容分配一个唯一的编号
词汇表:存放的是经过分词器拆分出来的词汇和该词汇在原始记录表中的编号
2) 为什么要将索引库进行优化
在默认情况下,向索引库中增加一个Document对象时,索引库自动会添加一个扩展名叫*.cfs的二进制压缩文件,如果向索引库中存Document对象过多,那么*.cfs也会不断增加,同时索引库的容量也会不断增加,影响索引库的大小。
3)索引库优化方案
package com.xp.lucene.entity;
public class Article {
private Integer id;//编号
private String title;//标题
private String content;//内容
private Integer count;//字数
public Article(){}
public Article(Integer id, String title, String content, Integer count) {
this.id = id;
this.title = title;
this.content = content;
this.count = count;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
public Integer getCount() {
return count;
}
public void setCount(Integer count) {
this.count = count;
}
@Override
public String toString() {
return "编号:" + id + "标题:" + title + "内容:" + content + "字数: " + count;
}
}
LuceneUtil.java
package com.xp.lucene.util;

import java.io.File;
import java.lang.reflect.Method;

import org.apache.commons.beanutils.BeanUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

public class LuceneUtil {
private static Directory directory;
private static Version version;
private static Analyzer analyzer;
private static MaxFieldLength maxFieldLength;

static{
try {
directory = FSDirectory.open(new File("E:/IndexDBDBDB"));
version = Version.LUCENE_30;
analyzer = new StandardAnalyzer(version);
maxFieldLength = MaxFieldLength.LIMITED;
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}

public static Directory getDirectory() {
return directory;
}

public static Version getVersion() {
return version;
}

public static Analyzer getAnalyzer() {
return analyzer;
}

public static MaxFieldLength getMaxFieldLength() {
return maxFieldLength;
}

//不让外界new该帮助类
private LuceneUtil(){}

//将JavaBean转成Document对象
public static Document javabean2document(Object obj) throws Exception{
//创建Docuemnt对象
Document document = new Document();
//获取obj引用的对象字节码
Class clazz = obj.getClass();
//通过对象字节码获取私有的属性
java.lang.reflect.Field[] reflectFields = clazz.getDeclaredFields();
//迭代
for(java.lang.reflect.Field reflectField : reflectFields){
//强力反射
reflectField.setAccessible(true);
//获取属性名,id/title/content
String name = reflectField.getName();
//人工拼接方法名
String methodName = "get" + name.substring(0,1).toUpperCase()+name.substring(1);
//获取方法,例如:getId()/getTitle()/getContent()
Method method = clazz.getMethod(methodName,null);
//执行方法
String value = method.invoke(obj,null).toString();
//加入到Document对象中去,这时javabean的属性与document对象的属性相同
document.add(new Field(name,value,Store.YES,Index.ANALYZED));
}
//返回document对象
return document;
}
//将Document对象转成JavaBean对象
public static Object document2javabean(Document document,Class clazz) throws Exception{
Object obj = clazz.newInstance();
java.lang.reflect.Field[] reflectFields = clazz.getDeclaredFields();
for(java.lang.reflect.Field reflectField : reflectFields){
reflectField.setAccessible(true);
String name = reflectField.getName();//id/title/content
String value = document.get(name);//1/培训/传智是一家培训机构
BeanUtils.setProperty(obj,name,value);//封装javabean对应的属性中去,通过setXxx()方法
}
return obj;
}
}
package com.xp.lucene.optimize;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.junit.Test;
import com.xp.lucene.entity.Article;
import com.xp.lucene.util.LuceneUtil;
/**
* 索引库进行优化
*/
public class ArticleDao {
/**
* 增加document对象索引库中
* 问题的引入
*/
@Test
public void add() throws Exception{
Article article = new Article(1,"培训","传智是一家it培训机构",10);
Document document = LuceneUtil.javabean2document(article);
IndexWriter indexWriter = new IndexWriter(LuceneUtil.getDirectory() ,LuceneUtil.getAnalyzer(),LuceneUtil.getMaxFieldLength());
indexWriter.addDocument(document);
indexWriter.close();
}
/**
* 合并cfs文件,合并后的cfs文件是二进制压缩字符,能解决是的文件大小和数量的问题
* @throws Exception
*/
@Test
public void type1() throws Exception{
Article article = new Article(1,"培训","传智是一家it培训机构",10);
Document document = LuceneUtil.javabean2document(article);
IndexWriter indexWriter = new IndexWriter(LuceneUtil.getDirectory() ,LuceneUtil.getAnalyzer(),LuceneUtil.getMaxFieldLength());
indexWriter.addDocument(document);
//合并cfs文本
indexWriter.optimize();
indexWriter.close();
}
/**
* 设定合并因子,自动合并cfs文件
* @throws Exception
*/
@Test
public void type2() throws Exception{
Article article = new Article(1,"培训","传智是一家it培训机构",10);
Document document = LuceneUtil.javabean2document(article);
IndexWriter indexWriter = new IndexWriter(LuceneUtil.getDirectory() ,LuceneUtil.getAnalyzer(),LuceneUtil.getMaxFieldLength());
indexWriter.addDocument(document);
//设设定合并因子,自动合并cfs文件,默认10个cfs文件合并成一个cfs文件,即满足3个cfs文本一合并
indexWriter.setMergeFactor(3);
indexWriter.close();
}
/**
* 默认情况下,每10个cfs文本一合并
* @throws Exception
*/
@Test
public void type3() throws Exception{
Article article = new Article(1,"培训","传智是一家it培训机构",10);
Document document = LuceneUtil.javabean2document(article);
IndexWriter indexWriter = new IndexWriter(LuceneUtil.getDirectory() ,LuceneUtil.getAnalyzer(),LuceneUtil.getMaxFieldLength());
indexWriter.addDocument(document);
//设置合并因子,即满足10个cfs文本一合并
//indexWriter.setMergeFactor(10);
indexWriter.close();
}
03_索引库优化_速度问题
/**
* 使用RAMDirectory,类似于内存索引库,能解决是的读取索引库文件的速度问题,它能以空换时,提高速度快,但不能持久保存,因此启动时加载硬盘中的索引库到内存中的索引库,退出时将内存中的索引库保存到硬盘中的索引库,且内容不能重复。
思考:项目中,什么地方能运用这个内存索引库解决速度的思想 * @throws Exception
*/
@Test
public void type4() throws Exception{
Article article = new Article(1,"培训","传智是一家it培训机构",10);
Document document = LuceneUtil.javabean2document(article);

//硬盘索引库
Directory fsDirectory = FSDirectory.open(new File("E:/IndexDBDBDB"));

//内存索引库,因为硬盘索引库的内容要同步到内存索引库中
Directory ramDirectory = new RAMDirectory(fsDirectory);

//指向硬盘索引库的字符流,true表示如果内存索引库中和硬盘索引库中的相同的document对象时,先删除硬盘索引库中的document对象,
//再将内存索引库的document对象写入硬盘索引库中
//反之是false,默认为false,这个boolean值写在硬盘字符流的构造器
IndexWriter fsIndexWriter = new IndexWriter(fsDirectory,LuceneUtil.getAnalyzer(),true,LuceneUtil.getMaxFieldLength());

//指向内存索引库的字符流
IndexWriter ramIndexWriter = new IndexWriter(ramDirectory,LuceneUtil.getAnalyzer(),LuceneUtil.getMaxFieldLength());

//将document对象写入内存索引库
ramIndexWriter.addDocument(document);
ramIndexWriter.close();

//将内存索引库的所有document对象同步到硬盘索引库中
fsIndexWriter.addIndexesNoOptimize(ramDirectory);
fsIndexWriter.close();

}
@Test
public void findAll() throws Exception{
String keywords = "家";
List<Article> articleList = new ArrayList<Article>();

QueryParser queryParser = new QueryParser(LuceneUtil.getVersion(),"content",LuceneUtil.getAnalyzer());
Query query = queryParser.parse(keywords);
IndexSearcher indexSearcher = new IndexSearcher(LuceneUtil.getDirectory());
TopDocs topDocs = indexSearcher.search(query,100);
for(int i=0;i<topDocs.scoreDocs.length;i++){
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
int no = scoreDoc.doc;
Document document = indexSearcher.doc(no);
Article article = (Article) LuceneUtil.document2javabean(document,Article.class);
articleList.add(article);
}
for(Article a : articleList){
System.out.println(a);
}
}
}
04_Lucene内置分词器
4.1什么是分词器
采用一种算法,将中英文本中的字符拆分开来,形成词汇,以待用户输入关健字后搜索
4.2为什么要分词器
因为用户输入的搜索的内容是一段文本中的一个关健字,和原始表中的内容有差别,
但作为搜索引擎来讲,又得将相关的内容搜索出来,此时就得采用分词器来最大限度
匹配原始表中的内容
4.3分词器工作流程
步一:按分词器拆分出词汇
步二:去除停用词和禁用词
步三:如果有英文,把英文字母转为小写,即搜索不分大小写
4.4分词器例子图解:“传智播客说我们的首都是北京呀I AM zhaojun”
4.5演示常用分词器测试,只观查结果
package com.xp.lucene.analyzer;

import java.io.StringReader;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer;
/**
* 测试Lucene内置和第三方分词器的分词效果
*/
public class TestAnalyzer {
private static void testAnalyzer(Analyzer analyzer, String text) throws Exception {
System.out.println("当前使用的分词器:" + analyzer.getClass());
TokenStream tokenStream = analyzer.tokenStream("content",new StringReader(text));
tokenStream.addAttribute(TermAttribute.class);
while (tokenStream.incrementToken()) {
TermAttribute termAttribute = tokenStream.getAttribute(TermAttribute.class);
System.out.println(termAttribute.term());
}
}
public static void main(String[] args) throws Exception{
//Lucene内存的分词器
//testAnalyzer(new StandardAnalyzer(LuceneUtil.getVersion()),"传智播客说我们的首都是北京呀it");
//testAnalyzer(new FrenchAnalyzer(LuceneUtil.getVersion()),"传智播客说我们的首都是北京呀it");
//testAnalyzer(new RussianAnalyzer(LuceneUtil.getVersion()),"传智播客说我们的首都是北京呀it");
//testAnalyzer(new ChineseAnalyzer(),"传智播客说我们的首都是北京呀it");
//testAnalyzer(new CJKAnalyzer(LuceneUtil.getVersion()),"传智播客说我们的首都是北京呀it");
//testAnalyzer(new CJKAnalyzer(LuceneUtil.getVersion()),"传智是一家IT培训机构");
//testAnalyzer(new FrenchAnalyzer(LuceneUtil.getVersion()),"传智是一家how are you培训机构");
testAnalyzer(new IKAnalyzer(),"传智播客说我们的首都是北京呀");
//testAnalyzer(new IKAnalyzer(),"上海自来水来自海上");
}
}
05_IK分词器
使用第三方IKAnalyzer分词器--------中文首选
需求:过滤掉上面例子中的“说”,“的”,“呀”,且将“传智播客”看成一个整体 关健字(将传智播客”看成一个整体 关健字未证明)
步一:导入IKAnalyzer分词器核心jar包,IKAnalyzer3.2.0Stable.jar
步二:将IKAnalyzer.cfg.xml和stopword.dic和xxx.dic文件复制到MyEclipse的src目录下,再进行配置,在配置时,首行需要一个空行
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<comment>IK Analyzer 扩展配置</comment>
<!-- 用户可以在这里配置自己的扩展字典 -->
<entry key="ext_dict">/mydict.dic</entry>
<!--用户可以在这里配置自己的扩展停止词字典 -->
<entry key="ext_stopwords">/surname.dic</entry>
</properties>
06_搜索结果关键字高亮
什么是搜索结果高亮
在搜索结果中,将与关健字相同的字符用红色显示
package com.xp.lucene.highlighter;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Formatter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.Scorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.junit.Test;

import com.xp.lucene.entity.Article;
import com.xp.lucene.util.LuceneUtil;
/**
* 搜索结果中关键字高亮
*/
public class ArticleDao {
/**
* 增加document对象索引库中
*/
@Test
public void add() throws Exception{
Article article = new Article(1,"培训","传智是一家it培训机构",10);
Document document = LuceneUtil.javabean2document(article);
IndexWriter indexWriter = new IndexWriter(LuceneUtil.getDirectory() ,LuceneUtil.getAnalyzer(),LuceneUtil.getMaxFieldLength());
indexWriter.addDocument(document);
indexWriter.close();
}
@Test
public void findAll() throws Exception{
String keywords = "培训";
List<Article> articleList = new ArrayList<Article>();
QueryParser queryParser = new QueryParser(LuceneUtil.getVersion(),"content",LuceneUtil.getAnalyzer());
Query query = queryParser.parse(keywords);
IndexSearcher indexSearcher = new IndexSearcher(LuceneUtil.getDirectory());
TopDocs topDocs = indexSearcher.search(query,100);
//以下代码对内容中含有关键字的字符串高亮显示
//格式对象
Formatter formatter = new SimpleHTMLFormatter("<font color='red'>","</font>");
//关键字对象
Scorer scorer = new QueryScorer(query);
//高亮对象
Highlighter highlighter = new Highlighter(formatter,scorer);
for(int i=0;i<topDocs.scoreDocs.length;i++){
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
int no = scoreDoc.doc;
//关键字没有高亮
Document document = indexSearcher.doc(no);
//关键字高亮
String titleHighlighter = highlighter.getBestFragment(LuceneUtil.getAnalyzer(),"title",document.get("title"));
String contentHighlighter = highlighter.getBestFragment(LuceneUtil.getAnalyzer(),"content",document.get("content"));
//将高亮后的结果再次封装到document对象中
document.getField("title").setValue(titleHighlighter);
document.getField("content").setValue(contentHighlighter);
Article article = (Article) LuceneUtil.document2javabean(document,Article.class);
articleList.add(article);
}
for(Article a : articleList){
System.out.println(a);
}
}
}
什么是搜索结果搞要
如果搜索结果内容太多,我们只想显示前几个字符, 必须与高亮一起使用
String keywords = "培训";
List<Article> articleList = new ArrayList<Article>();
QueryParser queryParser = new QueryParser(LuceneUtil.getVersion(),"content",LuceneUtil.getAnalyzer());
Query query = queryParser.parse(keywords);
IndexSearcher indexSearcher = new IndexSearcher(LuceneUtil.getDirectory());
TopDocs topDocs = indexSearcher.search(query,1000000);

Formatter formatter = new SimpleHTMLFormatter("<font color='red'>","</font>");
Scorer scorer = new QueryScorer(query);
Highlighter highlighter = new Highlighter(formatter,scorer);

Fragmenter fragmenter = new SimpleFragmenter(4);
highlighter.setTextFragmenter(fragmenter);

for(int i=0;i<topDocs.scoreDocs.length;i++){
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
int no = scoreDoc.doc;
Document document = indexSearcher.doc(no);

String highlighterContent = highlighter.getBestFragment(LuceneUtil.getAnalyzer(),"content",document.get("content"));
document.getField("content").setValue(highlighterContent);

Article article = (Article) LuceneUtil.document2javabean(document,Article.class);
articleList.add(article);
}
for(Article article : articleList){
System.out.println(article);
}
}
07_单字段和多字段排序
搜索结果排序
5.1什么是搜索结果排序
搜索结果是按某个或某些字段高低排序来显示的结果
5.2影响网站排名的先后的有多种
head/meta/
网页的标签整洁
网页执行速度
采用div+css
。。。。。。
请同学们关注网络营销课程
5.3Lucene中的显示结果次序与相关度得分有关
ScoreDoc.score;
默认情况下,Lucene是按相关度得分排序的,得分高排在前,得分低排在后
如果相关度得分相同,按插入索引库的先后次序排序
5.4Lucene中的设置相关度得分
Lucene中的设置相关度得分
IndexWriter indexWriter = new IndexWriter(LuceneUtil.getDirectory(),LuceneUtil.getAnalyzer(),LuceneUtil.getMaxFieldLength());
document.setBoost(20F);
indexWriter.addDocument(document);
indexWriter.close();
5.5Lucene中按单个字段排序
Sort sort = new Sort(new SortField("id",SortField.INT,true));
TopDocs topDocs = indexSearcher.search(query,null,1000000,sort);
5.6Lucene中按多个字段排序
Sort sort = new Sort(new SortField("count",SortField.INT,true),new SortField("id",SortField.INT,true));
TopDocs topDocs = indexSearcher.search(query,null,1000000,sort);
在多字段排序中,只有第一个字段排序结果相同时,第二个字段排序才有作用
提倡用数值型排序
08_单字段和多字段搜索
一) 条件搜索
6.1什么是条件搜索
用关健字与指定的单列或多例进行匹配的搜索
6.2单字段条件搜索
QueryParser queryParser = new QueryParser(LuceneUtil.getVersion(),"content",LuceneUtil.getAnalyzer());
6.3多字段条件搜索,项目中提倡多字段搜索
QueryParser queryParser = new MultiFieldQueryParser(LuceneUtil.getVersion(),new String[]{"content","title"},LuceneUtil.getAnalyzer());

一) 用第三方工具类,将JavaBean、List、Map<String,Object>转成JSON文本
导入第三方jar包:
》commons-beanutils-1.7.0.jar
》commons-collections-3.1.jar
》commons-lang-2.5.jar
》commons-logging-1.1.1.jar
》ezmorph-1.0.3.jar
》json-lib-2.1-jdk15.jar
(1)JavaBean->JSON
》JSONArray jsonArray = JSONArray.fromObject(city);
》String jsonJAVA = jsonArray.toString();
(2)List<JavaBean>->JSON
》JSONArray jsonArray = JSONArray.fromObject(cityList);
》String jsonJAVA = jsonArray.toString();
(3)List<String>->JSON
》JSONArray jsonArray = JSONArray.fromObject(stringList);
》String jsonJAVA = jsonArray.toString();
(4)Map<String,Object>->JSON【重点】
List<User> userList = new ArrayList<User>();
userList.add(new User(100,"哈哈",1000));
userList.add(new User(200,"呵呵",2000));
userList.add(new User(300,"嘻嘻",3000));

Map<String,Object> map = new LinkedHashMap<String,Object>();
map.put("total",userList.size());
map.put("rows",userList);

JSONArray jsonArray = JSONArray.fromObject(map);
String jsonJAVA = jsonArray.toString();
System.out.println(jsonJAVA);

jsonJAVA = jsonJAVA.substring(1,jsonJAVA.length()-1);
System.out.println(jsonJAVA);

二) 用JSON文本动态创建DataGrid
<table id="dg"></table>
$('#dg').datagrid({
url : 'data/datagrid_data.json',
columns:[[
{field:'code',title:'编号',width:100},
{field:'name',title:'姓名',width:100},
{field:'price',title:'薪水',width:100}
]]
});

三) 用Servlet返回JSON文本动态创建DataGrid
<table id="dg"></table>
$('#dg').datagrid({
url : '/lucene-day02/JsonServlet',
columns:[[
{field:'code',title:'编号',width:100},
{field:'name',title:'姓名',width:100},
{field:'price',title:'薪水',width:100}
]]
});
Servlet:
public void doPost(HttpServletRequest request, HttpServletResponse response)
request.setCharacterEncoding("UTF-8");

Integer currPageNO = null;
try {
//DateGrid会向服务端传入page参数,表示第几页
currPageNO = Integer.parseInt(request.getParameter("page"));
} catch (Exception e) {
currPageNO = 1;
}
//DateGrid会向服务端传入rows参数,表示几条记录
//Integer rows = Integer.parseInt(request.getParameter("rows"));
//System.out.println(currPageNO+":"+rows);

UserService userService = new UserService();
PageBean pageBean = userService.fy(currPageNO);

Map<String,Object> map = new LinkedHashMap<String,Object>();
map.put("total",pageBean.getAllRecordNO());
map.put("rows",pageBean.getUserList());

JSONArray jsonArray = JSONArray.fromObject(map);
String jsonJAVA = jsonArray.toString();
jsonJAVA = jsonJAVA.substring(1,jsonJAVA.length()-1);

System.out.println(jsonJAVA);
response.setContentType("text/html;charset=UTF-8");
response.getWriter().write(jsonJAVA);
response.getWriter().flush();
response.getWriter().close();

}
使用Jsp +Js + Jquery + EasyUI + Servlet + Lucene,完成分页
步一:创建ArticleDao.java类
public class ArticleDao {
public Integer getAllObjectNum(String keywords) throws Exception{
QueryParser queryParser = new QueryParser(LuceneUtil.getVersion(),"content",LuceneUtil.getAnalyzer());
Query query = queryParser.parse(keywords);
IndexSearcher indexSearcher = new IndexSearcher(LuceneUtil.getDirectory());
TopDocs topDocs = indexSearcher.search(query,3);
return topDocs.totalHits;
}
public List<Article> findAllObjectWithFY(String keywords,Integer start,Integer size) throws Exception{
List<Article> articleList = new ArrayList<Article>();
QueryParser queryParser = new QueryParser(LuceneUtil.getVersion(),"content",LuceneUtil.getAnalyzer());
Query query = queryParser.parse(keywords);
IndexSearcher indexSearcher = new IndexSearcher(LuceneUtil.getDirectory());
TopDocs topDocs = indexSearcher.search(query,100000000);
int middle = Math.min(start+size,topDocs.totalHits);
for(int i=start;i<middle;i++){
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
int no = scoreDoc.doc;
Document document = indexSearcher.doc(no);
Article article = (Article) LuceneUtil.document2javabean(document,Article.class);
articleList.add(article);
}
return articleList;
}
}

步二:创建PageBean.java类
public class PageBean {
private Integer allObjectNum;
private Integer allPageNum;
private Integer currPageNum;
private Integer perPageNum = 2;
private List<Article> articleList = new ArrayList<Article>();
public PageBean(){}
public Integer getAllObjectNum() {
return allObjectNum;
}
public void setAllObjectNum(Integer allObjectNum) {
this.allObjectNum = allObjectNum;
if(this.allObjectNum % this.perPageNum == 0){
this.allPageNum = this.allObjectNum / this.perPageNum;
}else{
this.allPageNum = this.allObjectNum / this.perPageNum + 1;
}
}
public Integer getAllPageNum() {
return allPageNum;
}
public void setAllPageNum(Integer allPageNum) {
this.allPageNum = allPageNum;
}
public Integer getCurrPageNum() {
return currPageNum;
}
public void setCurrPageNum(Integer currPageNum) {
this.currPageNum = currPageNum;
}
public Integer getPerPageNum() {
return perPageNum;
}
public void setPerPageNum(Integer perPageNum) {
this.perPageNum = perPageNum;
}
public List<Article> getArticleList() {
return articleList;
}
public void setArticleList(List<Article> articleList) {
this.articleList = articleList;
}
}

步三:创建ArticleService.java类
public class ArticleService {
private ArticleDao articleDao = new ArticleDao();
public PageBean fy(String keywords,Integer currPageNum) throws Exception{
PageBean pageBean = new PageBean();
pageBean.setCurrPageNum(currPageNum);
Integer allObjectNum = articleDao.getAllObjectNum(keywords);
pageBean.setAllObjectNum(allObjectNum);
Integer size = pageBean.getPerPageNum();
Integer start = (pageBean.getCurrPageNum()-1) * size;
List<Article> articleList = articleDao.findAllObjectWithFY(keywords,start,size);
pageBean.setArticleList(articleList);
return pageBean;
}
}

步四:创建ArticleServlet.java类
public class UserServlet extends HttpServlet {
public void doPost(HttpServletRequest request, HttpServletResponse response)throws ServletException, IOException {
try {
//获取当前页号,默认1
String strCurrPageNO = request.getParameter("page");
if(strCurrPageNO == null){
strCurrPageNO = "1";
}
Integer currPageNO = Integer.parseInt(strCurrPageNO);
//获取关健字
String keywords = request.getParameter("keywords");
//创建业务对象
UserService userService = new UserService();
//调用业务层
PageBean pageBean = userService.fy(keywords,currPageNO);
//以下代码生成DateGrid需要的JSON文本
Map<String,Object> map = new LinkedHashMap<String,Object>();
//总记录数
map.put("total",pageBean.getAllRecordNO());
//该页显示的内容
map.put("rows",pageBean.getUserList());
JSONArray jsonArray = JSONArray.fromObject(map);
String jsonJAVA = jsonArray.toString();
jsonJAVA = jsonJAVA.substring(1,jsonJAVA.length()-1);
//以下代码是将json文本输出到浏览器给DateGrid组件
response.setContentType("text/html;charset=UTF-8");
response.getWriter().write(jsonJAVA);
response.getWriter().flush();
response.getWriter().close();
} catch (Exception e) {
}
}
}

步五:导入EasyUI相关的js包的目录

步六:在WebRoot目录下创建list.jsp

<%@ page language="java" pageEncoding="UTF-8"%>
<%@ taglib uri="http://java.sun.com/jsp/jstl/core" prefix="c" %>
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<link rel="stylesheet" href="themes/default/easyui.css" type="text/css"></link>
<link rel="stylesheet" href="themes/icon.css" type="text/css"></link>
<script type="text/javascript" src="js/jquery.min.js"></script>
<script type="text/javascript" src="js/jquery.easyui.min.js"></script>
<script type="text/javascript" src="locale/easyui-lang-zh_CN.js"></script>
</head>

<body>

输入姓名关健字:
<input type="text" size="4px" id="name"/>
<input type="button" value="搜索" id="find"/>

<table id="dg" style="width:500px"></table>

<script type="text/javascript">
//定位"搜索"按钮,同时添加单击事件
$("#find").click(function(){
//获取用户名
var name = $("#name").val();
//去二边的空格
name = $.trim(name);
//加载最新数据
$("#dg").datagrid("load",{
"keywords" : name
});
});
</script>

<script type="text/javascript">
//动态创建表格
$("#dg").datagrid({
url:'${pageContext.request.contextPath}/UserServlet?id=' + new Date().getTime(),
fitColumns : true,
singleSelect : true,
columns:[[
{field:'id',title:'编号',width:100,align:'center'},
{field:'name',title:'姓名',width:100,align:'center'},
{field:'sal',title:'薪水',width:100,align:'center'}
]],
pagination : true,
pageNumber : 1,
pageSize : 2,
pageList:[2]
});
</script>
</body>
</html>
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
相关文章推荐