简单爬虫的实现原理
2009-09-05 14:30
357 查看
import java.io.BufferedReader;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.net.URL;
import java.net.URLConnection;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Test {
/**
* *
*
* @param args
*/
public static void main(String[] args) {
try {
URL url = new URL("http://www.sina.com.cn");
URLConnection conn = url.openConnection();
conn.setDoOutput(true);
InputStream in = null;
in = url.openStream();
String content = pipe(in, "gbk");
System.out.println(content);
String regex = "<(a|A)(//s|//w|//.|/|#|//?|//&|//=|%|:|,|;|-|//)|//(|/'|/")*>";
java.util.regex.Pattern pattern = Pattern.compile(regex);
Matcher matcher = pattern.matcher(content);
while(matcher.find()) {
System.out.println(matcher.group());
System.out.println(getURL(matcher.group()));
}
} catch (Exception e) {
e.printStackTrace();
}
}
public static String getURL(String content) {
String result = "";
int l = 0;
int s = 0;
int e = 0;
l = content.length();
content = content.toLowerCase();
s = content.indexOf("href=/"") + 6;
e = content.indexOf("/" ", s);
if(e == -1) {
e = content.indexOf("/">", s);
}
if(s > 0 && e > s) {
result = content.substring(s, e);
}
return result;
}
public static String pipe(InputStream in, String charset) throws IOException {
StringBuffer s = new StringBuffer();
if (charset == null || "".equals(charset)) {
charset = "utf-8";
}
String rLine = null;
BufferedReader bReader = new BufferedReader(new InputStreamReader(in,
charset));
PrintWriter pw = null;
FileOutputStream fo = new FileOutputStream("../index.html");
OutputStreamWriter writer = new OutputStreamWriter(fo, "utf-8");
pw = new PrintWriter(writer);
while ((rLine = bReader.readLine()) != null) {
String tmp_rLine = rLine;
int str_len = tmp_rLine.length();
if (str_len > 0) {
s.append(tmp_rLine);
pw.println(tmp_rLine);
pw.flush();
s.append("/n");
}
tmp_rLine = null;
}
in.close();
pw.close();
return s.toString();
}
}
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.net.URL;
import java.net.URLConnection;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Test {
/**
* *
*
* @param args
*/
public static void main(String[] args) {
try {
URL url = new URL("http://www.sina.com.cn");
URLConnection conn = url.openConnection();
conn.setDoOutput(true);
InputStream in = null;
in = url.openStream();
String content = pipe(in, "gbk");
System.out.println(content);
String regex = "<(a|A)(//s|//w|//.|/|#|//?|//&|//=|%|:|,|;|-|//)|//(|/'|/")*>";
java.util.regex.Pattern pattern = Pattern.compile(regex);
Matcher matcher = pattern.matcher(content);
while(matcher.find()) {
System.out.println(matcher.group());
System.out.println(getURL(matcher.group()));
}
} catch (Exception e) {
e.printStackTrace();
}
}
public static String getURL(String content) {
String result = "";
int l = 0;
int s = 0;
int e = 0;
l = content.length();
content = content.toLowerCase();
s = content.indexOf("href=/"") + 6;
e = content.indexOf("/" ", s);
if(e == -1) {
e = content.indexOf("/">", s);
}
if(s > 0 && e > s) {
result = content.substring(s, e);
}
return result;
}
public static String pipe(InputStream in, String charset) throws IOException {
StringBuffer s = new StringBuffer();
if (charset == null || "".equals(charset)) {
charset = "utf-8";
}
String rLine = null;
BufferedReader bReader = new BufferedReader(new InputStreamReader(in,
charset));
PrintWriter pw = null;
FileOutputStream fo = new FileOutputStream("../index.html");
OutputStreamWriter writer = new OutputStreamWriter(fo, "utf-8");
pw = new PrintWriter(writer);
while ((rLine = bReader.readLine()) != null) {
String tmp_rLine = rLine;
int str_len = tmp_rLine.length();
if (str_len > 0) {
s.append(tmp_rLine);
pw.println(tmp_rLine);
pw.flush();
s.append("/n");
}
tmp_rLine = null;
}
in.close();
pw.close();
return s.toString();
}
}
相关文章推荐
- Python的简单爬虫原理及实现
- Java简单爬虫系列(1)---什么是爬虫,爬虫原理,怎么来实现爬虫
- Java简单的网络爬虫实现
- 单点登录原理与简单实现
- Java实现的网络爬虫程序,简单易懂无框架(我的网络大作业)
- python实现简单爬虫以及正则表达式简述
- 简单实现Spring的IOC原理详解
- 多线程断点续传的原理及其简单实现
- 02. 使用上述01安装库实现最简单的网络爬虫
- 单点登录原理与简单实现
- python实现简单爬虫--爬图片
- 单点登录原理与简单实现
- Spring的MethodBeforeAdvice实现原理探究&&实现一个简单的Advice
- 淘宝HSF服务的原理以及简单的实现
- python在线编译器的简单原理与超简单实现
- 单点登录原理与简单实现
- dubbo使用经验及实现原理简单介绍(转载)
- Skinned Mesh 原理解析和一个最简单的实现示例
- 单点登录原理和简单实现
- java实现的简单网页爬虫:Servlet 搜索引擎核心爬虫程序(三)