基本的な深さ優先検索ベースの Web クローラーを作成しようとしています。これが私の現在のコードです:
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.io.*;
import java.net.*;
public class DepthFirstSpider {
private List<String> visitedList; //web pages already visited
private static String hrefExpr = "href\\s*=\\s*\"([^\"]+)\"";
private static Pattern pattern = Pattern.compile(hrefExpr);
private int limit;
private static Matcher matcher;
private static URL contextURL;
private static URL url;
public List<String> getVisitedList() { return visitedList; }
//initialize the visitedlist and limit instance variables. Visit the starting url.
public DepthFirstSpider(int limit, String startingURL) {
visitedList = new ArrayList<String>();
this.limit = limit;
try {
contextURL = new URL(startingURL);
} catch (MalformedURLException e) {
}
visit(startingURL);
}
//print and add urlString to list of visited web pages
//create url and connect, read through html contents:
//when href encountered create new url relative to the current url and visit it (if not already visited and limit not reached)
public void visit(String urlString) {
try{
url = new URL(contextURL, urlString);
URLConnection connection = url.openConnection();
InputStream inputStream = connection.getInputStream();
BufferedReader reader = new BufferedReader(
new InputStreamReader(inputStream));
String nextLine;
while((nextLine=reader.readLine()) != null){
matcher = pattern.matcher(nextLine);
while(matcher.find() && limit > 0 && !visitedList.contains(url.toString())){
System.out.println("visiting " + url.toString());
visitedList.add(url.toString());
visit(matcher.group(1));
limit--;
}
}
} catch (MalformedURLException e){
} catch (IOException e){
}
}
}
検索は現在、問題なく Web ページのツリーを撃ち落とします。それを元に戻してから、見逃したページに移動するのに助けが必要です. 助けてくれてありがとう。