如何使用jsoup抓取多个网址

时间:2016-03-11 11:54:32

标签: java web-crawler jsoup

我有下面的代码使用JSoup抓取网站,但我想同时抓取多个网址。 我将URL存储在一个数组中,但我无法使其工作。 如果我想使用它,如何在多线程中实现此代码?多线程是否适合这种应用?

public class Webcrawler {
    public static void main(String[] args) throws IOException {

        String [] url = {"http://www.dmoz.org/","https://docs.oracle.com/en/"}; 
        //String [] url = new String[3];
        //url[0] = "http://www.dmoz.org/";
        //url[1] = "http://www.dmoz.org/Computers/Computer_Science/";
        //url[2] = "https://docs.oracle.com/en/";

        for(String urls : url){
            System.out.print("Sites to be crawled\n " + urls);
        }
        //String url = "http://www.dmoz.org/";
        print("\nFetching %s...", url);

        Document doc = Jsoup.connect(url[0]).get();
        org.jsoup.select.Elements links = doc.select("a");
        //doc.select("a[href*=https]");//(This is the one you are looking for)selects if value of href contatins https
        print("\nLinks: (%d)", links.size());
        for (Element link : links) {
            print(" (%s)", link.absUrl("href") /*link.attr("href")*/, trim(link.text(), 35));     
        }
    }

    private static void print(String msg, Object... args) {
        System.out.println(String.format(msg, args));
    }

    private static String trim(String s, int width) {
        if (s.length() > width)
            return s.substring(0, width-1) + ".";
        else
            return s;
    }
}

1 个答案:

答案 0 :(得分:2)

您可以同时使用多线程和抓取多个网站。以下代码可以满足您的需求。我非常确定它可以改进很多(例如使用Executor),但我写得很快。

public class Main {

    public static void main(String[] args) {

        String[] urls = new String[]{"http://www.dmoz.org/", "http://www.dmoz.org/Computers/Computer_Science/", "https://docs.oracle.com/en/"};

        // Create and start workers
        List<Worker> workers = new ArrayList<>(urls.length);
        for (String url : urls) {
            Worker w = new Worker(url);
            workers.add(w);
            new Thread(w).start();
        }

        // Retrieve results
        for (Worker w : workers) {
            Elements results = w.waitForResults();
            if (results != null)
                System.out.println(w.getName()+": "+results.size());
            else
                System.err.println(w.getName()+" had some error!");
        }
    }
}

class Worker implements Runnable {

    private String url;
    private Elements results;
    private String name;
    private static int number = 0;

    private final Object lock = new Object();

    public Worker(String url) {
        this.url = url;
        this.name = "Worker-" + (number++);
    }

    public String getName() {
        return name;
    }

    @Override
    public void run() {
        try {
            Document doc = Jsoup.connect(this.url).get();

            Elements links = doc.select("a");

            // Update results
            synchronized (lock) {
                this.results = links;
                lock.notifyAll();
            }
        } catch (IOException e) {
            // You should implement a better error handling code..
            System.err.println("Error while parsing: "+this.url);
            e.printStackTrace();
        }
    }

    public Elements waitForResults() {
        synchronized (lock) {
            try {
                while (this.results == null) {
                    lock.wait();
                }
                return this.results;
            } catch (InterruptedException e) {
                // Again better error handling
                e.printStackTrace();
            }

            return null;
        }
    }
}
相关问题