久违的摸鱼时间,看了看B站up主楠哥的内容。
第一次接触爬虫,总的来说是两步走战略,一是获取网页的图片的url,二是根据url下载到本地。
本次的需求是从 https://mp.weixin.qq.com/s?__biz=Mzg2NTAzMTExNg==&mid=2247483699&idx=1&sn=2dd9aeee040d24ad13fe4f2ce5104b05&scene=19#wechat_redirect 下载页面的所有图片到本地。
public static void main(String[] args) throws Exception {
//第一步 获取url
String url = "https://mp.weixin.qq.com/s?__biz=Mzg2NTAzMTExNg==&mid=2247483699&idx=1&sn=2dd9aeee040d24ad13fe4f2ce5104b05&scene=19#wechat_redirect";
Document document = Jsoup.parse(new URL(url), 10000);
Element element = document.getElementById("activity-detail")
.getElementById("js_article")
.getElementById("page-content");
Elements elementsByTag = element.getElementsByTag("img");
int id = 0;
//第二步 下载
for (Element img : elementsByTag) {
String src = img.attr("data-src");
if ("".equals(src)){
continue;
}
//获取输入流
URL target = new URL(src);
System.out.println(target);
URLConnection urlConnection = target.openConnection();
InputStream inputStream = urlConnection.getInputStream();
id++;
OutputStream outputStream = new FileOutputStream("/Users/hushuli/Downloads/imgs/" + id + ".jpg");
int tmp = 0;
while ((tmp = inputStream.read()) != -1) {
outputStream.write(tmp);
}
System.out.println(id + "下载完毕");
inputStream.close();
outputStream.close();
}
}