天天看點

超簡單的 Web 爬蟲程式

       超簡單的 Web 爬蟲程式,不過可以在他基礎之上改造一下,寫出強大點的爬蟲!

        謝謝提供程式的 blog 友!

/**

 * @author Jack.Wang

 * 

 */

import java.io.BufferedReader;

import java.io.InputStreamReader;

import java.net.URL;

import java.util.ArrayList;

import java.util.HashMap;

import java.util.HashSet;

import java.util.LinkedHashSet;

import java.util.regex.Matcher;

import java.util.regex.Pattern;

// 搜尋Web爬行者

public class SearchCrawler implements Runnable {

 /*

  * disallowListCache緩存robot不允許搜尋的URL。 Robot協定在Web站點的根目錄下設定一個robots.txt檔案,

  * 規定站點上的哪些頁面是限制搜尋的。 

  * 搜尋程式應該在搜尋過程中跳過這些區域,下面是robots.txt的一個例子: 

  * # robots.txt for http://somehost.com/ User-agent:

  * Disallow: /cgi-bin/ 

  * Disallow: /registration # Disallow robots on registration page

  * Disallow: /login

  */

 private HashMap<String, ArrayList<String>> disallowListCache = new HashMap<String, ArrayList<String>>();

 ArrayList<String> errorList = new ArrayList<String>();// 錯誤資訊

 ArrayList<String> result = new ArrayList<String>(); // 搜尋到的結果

 String startUrl;// 開始搜尋的起點

 int maxUrl;// 最大處理的url數

 String searchString;// 要搜尋的字元串(英文)

 boolean caseSensitive = false;// 是否區分大小寫

 boolean limitHost = false;// 是否在限制的主機内搜尋

 public SearchCrawler(String startUrl, int maxUrl, String searchString) {

  this.startUrl = startUrl;

  this.maxUrl = maxUrl;

  this.searchString = searchString;

 }

 public ArrayList<String> getResult() {

  return result;

 public void run() {// 啟動搜尋線程

  crawl(startUrl, maxUrl, searchString, limitHost, caseSensitive);

 // 檢測URL格式

 private URL verifyUrl(String url) {

  // 隻處理HTTP URLs.

  if (!url.toLowerCase().startsWith("http://"))

   return null;

  URL verifiedUrl = null;

  try {

   verifiedUrl = new URL(url);

  } catch (Exception e) {

  }

  return verifiedUrl;

 // 檢測robot是否允許通路給出的URL.

 private boolean isRobotAllowed(URL urlToCheck) {

  String host = urlToCheck.getHost().toLowerCase();// 擷取給出RUL的主機

  // System.out.println("主機="+host);

  // 擷取主機不允許搜尋的URL緩存

  ArrayList<String> disallowList = disallowListCache.get(host);

  // 如果還沒有緩存,下載下傳并緩存。

  if (disallowList == null) {

   disallowList = new ArrayList<String>();

   try {

    URL robotsFileUrl = new URL("http://" + host + "/robots.txt");

    BufferedReader reader = new BufferedReader(

      new InputStreamReader(robotsFileUrl.openStream()));

    // 讀robot檔案,建立不允許通路的路徑清單。

    String line;

    while ((line = reader.readLine()) != null) {

     if (line.indexOf("Disallow:") == 0) {// 是否包含"Disallow:"

      String disallowPath = line.substring("Disallow:"

        .length());// 擷取不允許通路路徑

      // 檢查是否有注釋。

      int commentIndex = disallowPath.indexOf("#");

      if (commentIndex != -1) {

       disallowPath = disallowPath.substring(0,

         commentIndex);// 去掉注釋

      }

      disallowPath = disallowPath.trim();

      disallowList.add(disallowPath);

     }

    }

    // 緩存此主機不允許通路的路徑。

    disallowListCache.put(host, disallowList);

   } catch (Exception e) {

    return true; // web站點根目錄下沒有robots.txt檔案,傳回真

   }

  String file = urlToCheck.getFile();

  // System.out.println("檔案getFile()="+file);

  for (int i = 0; i < disallowList.size(); i++) {

   String disallow = disallowList.get(i);

   if (file.startsWith(disallow)) {

    return false;

  return true;

 private String downloadPage(URL pageUrl) {

   // Open connection to URL for reading.

   BufferedReader reader = new BufferedReader(new InputStreamReader(

     pageUrl.openStream()));

   // Read page into buffer.

   String line;

   StringBuffer pageBuffer = new StringBuffer();

   while ((line = reader.readLine()) != null) {

    pageBuffer.append(line);

   return pageBuffer.toString();

  return null;

 // 從URL中去掉"www"

 private String removeWwwFromUrl(String url) {

  int index = url.indexOf("://www.");

  if (index != -1) {

   return url.substring(0, index + 3) + url.substring(index + 7);

  return (url);

 // 解析頁面并找對外連結接

 private ArrayList<String> retrieveLinks(URL pageUrl, String pageContents,

   HashSet crawledList, boolean limitHost) {

  // 用正規表達式編譯連結的比對模式。

  Pattern p = Pattern.compile("<a\\s+href\\s*=\\s*\"?(.*?)[\"|>]",

    Pattern.CASE_INSENSITIVE);

  Matcher m = p.matcher(pageContents);

  ArrayList<String> linkList = new ArrayList<String>();

  while (m.find()) {

   String link = m.group(1).trim();

   if (link.length() < 1) {

    continue;

   // 跳過鍊到本頁面内連結。

   if (link.charAt(0) == '#') {

   if (link.indexOf("mailto:") != -1) {

   if (link.toLowerCase().indexOf("javascript") != -1) {

   if (link.indexOf("://") == -1) {

    if (link.charAt(0) == '/') {// 處理絕對地

     link = "http://" + pageUrl.getHost() + ":"

       + pageUrl.getPort() + link;

    } else {

     String file = pageUrl.getFile();

     if (file.indexOf('/') == -1) {// 處理相對位址

      link = "http://" + pageUrl.getHost() + ":"

        + pageUrl.getPort() + "/" + link;

     } else {

      String path = file.substring(0,

        file.lastIndexOf('/') + 1);

        + pageUrl.getPort() + path + link;

   int index = link.indexOf('#');

   if (index != -1) {

    link = link.substring(0, index);

   link = removeWwwFromUrl(link);

   URL verifiedLink = verifyUrl(link);

   if (verifiedLink == null) {

   /* 如果限定主機,排除那些不合條件的URL */

   if (limitHost

     && !pageUrl.getHost().toLowerCase().equals(

       verifiedLink.getHost().toLowerCase())) {

   // 跳過那些已經處理的連結.

   if (crawledList.contains(link)) {

   linkList.add(link);

  return (linkList);

 // 搜尋下載下傳Web頁面的内容,判斷在該頁面内有沒有指定的搜尋字元串

 private boolean searchStringMatches(String pageContents,

   String searchString, boolean caseSensitive) {

  String searchContents = pageContents;

  if (!caseSensitive) {// 如果不區分大小寫

   searchContents = pageContents.toLowerCase();

  Pattern p = Pattern.compile("[\\s]+");

  String[] terms = p.split(searchString);

  for (int i = 0; i < terms.length; i++) {

   if (caseSensitive) {

    if (searchContents.indexOf(terms[i]) == -1) {

     return false;

   } else {

    if (searchContents.indexOf(terms[i].toLowerCase()) == -1) {

 // 執行實際的搜尋操作

 public ArrayList<String> crawl(String startUrl, int maxUrls,

   String searchString, boolean limithost, boolean caseSensitive) {

  HashSet<String> crawledList = new HashSet<String>();

  LinkedHashSet<String> toCrawlList = new LinkedHashSet<String>();

  if (maxUrls < 1) {

   errorList.add("Invalid Max URLs value.");

   System.out.println("Invalid Max URLs value.");

  if (searchString.length() < 1) {

   errorList.add("Missing Search String.");

   System.out.println("Missing search String");

  if (errorList.size() > 0) {

   System.out.println("err!!!");

   return errorList;

  // 從開始URL中移出www

  startUrl = removeWwwFromUrl(startUrl);

  toCrawlList.add(startUrl);

  while (toCrawlList.size() > 0) {

   if (maxUrls != -1) {

    if (crawledList.size() == maxUrls) {

     break;

   // Get URL at bottom of the list.

   String url = toCrawlList.iterator().next();

   // Remove URL from the to crawl list.

   toCrawlList.remove(url);

   // Convert string url to URL object.

   URL verifiedUrl = verifyUrl(url);

   // Skip URL if robots are not allowed to access it.

   if (!isRobotAllowed(verifiedUrl)) {

   // 增加已處理的URL到crawledList

   crawledList.add(url);

   String pageContents = downloadPage(verifiedUrl);

   if (pageContents != null && pageContents.length() > 0) {

    // 從頁面中擷取有效的連結

    ArrayList<String> links = retrieveLinks(verifiedUrl,

      pageContents, crawledList, limitHost);

    toCrawlList.addAll(links);

    if (searchStringMatches(pageContents, searchString,

      caseSensitive)) {

     result.add(url);

     System.out.println(url);

 // 主函數

 public static void main(String[] args) { 

  SearchCrawler crawler = new SearchCrawler("http://www.blogjava.net/Jack2007/", 20,"jack");

  Thread search = new Thread(crawler);

  System.out.println("Start searching...");

  System.out.println("result:");

  search.start();

   search.join();

  } catch (InterruptedException e) {

   // TODO Auto-generated catch block

   e.printStackTrace();

}

繼續閱讀