如何使用类名从网站中提取脚本特定脚本文件

时间:2016-08-31 08:33:55

标签: java jsoup

我有一组特定的网页,我想检查是否在这些网页中使用了特定的网址。由于有37000个网页数量并且手动搜索网址非常耗时。我有一个脚本会从网页上抓取网址。我想在网页中搜索“adsbygoogle”关键字,但我不知道如何使用此关键字从网页搜索网址。这是我到现在为止所尝试的。

import org.jsoup.Jsoup;
import org.jsoup.helper.Validate;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;

/**
 * Example program to list links from a URL.
 */
public class ListLinks {
    public static void main(String[] args) throws IOException {

        String url = "url1,url2,..";
        print("Fetching %s...", url);

        Document doc = Jsoup.connect(url).timeout(10000).get();
        Elements links = doc.select("a[href]");
        Elements media = doc.select("[src]");
        Elements imports = doc.select("link[href]");
        Elements scripts = doc.getElementsByTag("script");
        for (Element script : scripts) {
             System.out.println(script.data());
        }

        print("\nMedia: (%d)", media.size());
        for (Element src : media) {
            if (src.tagName().equals("img"))
                print(" * %s: <%s> %sx%s (%s)",
                        src.tagName(), src.attr("abs:src"), src.attr("width"), src.attr("height"),
                        trim(src.attr("alt"), 20));
            else
                print(" * %s: <%s>", src.tagName(), src.attr("abs:src"));
        }

        print("\nImports: (%d)", imports.size());
        for (Element link : imports) {
            print(" * %s <%s> (%s)", link.tagName(),link.attr("abs:href"), link.attr("rel"));
        }

        print("\nLinks: (%d)", links.size());
        for (Element link : links) {
            print(" * a: <%s>  (%s)", link.attr("abs:href"), trim(link.text(), 35));
        }
    }

    private static void print(String msg, Object... args) {
        System.out.println(String.format(msg, args));
    }

    private static String trim(String s, int width) {
        if (s.length() > width)
            return s.substring(0, width-1) + ".";
        else
            return s;
    }
}

我已经使用过这段代码,但我也没有把这段代码放在程序

<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>
<!-- xxxxx -->
<ins class="adsbygoogle"
     style="display:inline-block;width:xxxpx;height:xxxpx"
     data-ad-client="ca-pub-3778064616989016"
     data-ad-slot="xxxxxx"></ins>
<script>

1 个答案:

答案 0 :(得分:1)

以下示例使用jsoup处理网址列表(pagesToCrawl)。使用doc.select("script")选择所有脚本元素,然后针对searchTerm(&#34; adsbygoogle.js&#34;)进行解析。这只是一个普通的poc,对于成千上万的URL,可能需要批量处理条目并存储在文件中而不是存储在内存中等。

import org.jsoup.Connection.Response;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

public class ListLinks {

    public void scanForScript(ArrayList<String> pagesToCrawl, String scriptName){

        List<String> pagesWithoutScript = new ArrayList<>();
        List<String> pagesWithScript = new ArrayList<>();
        String searchTerm = scriptName;

        Response response;
        Document doc;

        for (String page : pagesToCrawl) {
            try {
                response = Jsoup.connect(page)
                        .userAgent(
                                "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36")
                        .followRedirects(true).ignoreHttpErrors(true).execute();

                if (response.statusCode() != 200) {
                    System.out.println(page + " returned " + response.statusCode()); // handle pages with 404 error
                    continue;
                }

                doc = response.parse();

                boolean containsScript = false;
                for (Element scriptElement : doc.select("script")) { // grab the script elements
                    if (scriptElement.toString().contains(searchTerm)) { // search for searchterm
                        containsScript = true;
                        break;
                    }
                }
                if (containsScript) { // store filtered pages
                    pagesWithScript.add(page);
                } else {
                    pagesWithoutScript.add(page);
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }

        System.out.println("\nNumber of pages containing script: " + pagesWithScript.size());
        for (String page : pagesWithScript) {
            System.out.println(page);
        }
        System.out.println("\n" + "Number of pages not containing script:" + pagesWithoutScript.size());
        for (String page : pagesWithoutScript) {
            System.out.println(page);
        }
    }

    public static void main(String[] args) {

        ArrayList<String> pagesToCrawl = new ArrayList<>();
        pagesToCrawl.add("http://stackoverflow.com/q/39244584/1661938");
        pagesToCrawl.add("http://www.quickonlinetips.com/archives/2013/07/load-google-adsense-script-once/");
        pagesToCrawl.add("http://www.w3schools.com/cssref/css_selectors.asp");
        pagesToCrawl.add("http://www.apnapaisa.com/agent-registration/index.html");

        new ListLinks().scanForScript(pagesToCrawl, "adsbygoogle.js");

    }

}

输出:

http://www.apnapaisa.com/agent-registration/index.html returned 404

Number of pages containing script: 1
http://www.quickonlinetips.com/archives/2013/07/load-google-adsense-script-once/

Number of pages not containing script:2
http://stackoverflow.com/q/39244584/1661938
http://www.w3schools.com/cssref/css_selectors.asp