上次做了一个帮公司妹子做了爬虫,不是很精致,这次公司项目里要用到,于是有做了一番修改,功能添加了网址图片采集,下载,线程处理界面网址图片下载等。
说说思路:首相获取初始网址的所有内容 在初始网址采集图片 去初始网址采集链接 把采集到的链接放入队列 继续采集图片,然后继续采集链接,无限循环
还是上代码!
处理网页内容抓取跟网页网址爬取都做了改进,下面还是大家来看看代码,有不足之处,还请之处!
网页内容抓取HtmlCodeRequest,
网页网址爬取GetHttpLinks,用正则去筛选html中的Links
图片抓取GetHtmlImageUrlList,用正则去筛选html中的Img
都写进了一个封装类里面 HttpHelper
//////取得HTML中所有图片的 URL。//////HTML代码///图片的URL列表publicstaticstringHtmlCodeRequest(stringUrl)
{if(string.IsNullOrEmpty(Url))
{return"";
}try{//创建一个请求HttpWebRequest httprequst =(HttpWebRequest)WebRequest.Create(Url);//不建立持久性链接httprequst.KeepAlive =true;//设置请求的方法httprequst.Method ="GET";//设置标头值httprequst.UserAgent ="User-Agent:Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705";
httprequst.Accept="*/*";
httprequst.Headers.Add("Accept-Language","zh-cn,en-us;q=0.5");
httprequst.ServicePoint.Expect100Continue=false;
httprequst.Timeout=5000;
httprequst.AllowAutoRedirect=true;//是否允许302ServicePointManager.DefaultConnectionLimit =30;//获取响应HttpWebResponse webRes =(HttpWebResponse)httprequst.GetResponse();//获取响应的文本流stringcontent =string.Empty;using(System.IO.Stream stream =webRes.GetResponseStream())
{using(System.IO.StreamReader reader =newStreamReader(stream, System.Text.Encoding.GetEncoding("utf-8")))
{
content=reader.ReadToEnd();
}
}//取消请求httprequst.Abort();//返回数据内容returncontent;
}catch(Exception)
{return"";
}
}//////提取页面链接/////////publicstaticList GetHtmlImageUrlList(stringurl)
{stringhtml =HttpHelper.HtmlCodeRequest(url);if(string.IsNullOrEmpty(html))
{returnnewList();
}//定义正则表达式用来匹配 img 标签Regex regImg =newRegex(@"]*?\bsrc[\s\t\r\n]*=[\s\t\r\n]*[""']?[\s\t\r\n]*(?[^\s\t\r\n""'<>]*)[^<>]*?/?[\s\t\r\n]*>", RegexOptions.IgnoreCase);//搜索匹配的字符串MatchCollection matches =regImg.Matches(html);
List sUrlList =newList();//取得匹配项列表foreach(Match matchinmatches)
sUrlList.Add(match.Groups["imgUrl"].Value);returnsUrlList;
}//////提取页面链接/////////publicstaticList GetHttpLinks(stringurl)
{//获取网址内容stringhtml =HttpHelper.HtmlCodeRequest(url);if(string.IsNullOrEmpty(html))
{returnnewList();
}//匹配http链接conststringpattern2 =@"http(s)?://([\w-]+\.)+[\w-]+(/[\w- ./?%&=]*)?";
Regex r2=newRegex(pattern2, RegexOptions.IgnoreCase);//获得匹配结果MatchCollection m2 =r2.Matches(html);
List links =newList();foreach(Match url2inm2)
{if(StringHelper.CheckUrlIsLegal(url2.ToString()) || !StringHelper.IsPureUrl(url2.ToString()) ||links.Contains(url2.ToString()))continue;
links.Add(url2.ToString());
}//匹配href里面的链接conststringpattern =@"(?i)]*?href=(['""]?)(?!javascript|__doPostBack)(?[^'""\s*#<>]+)[^>]*>"; ;
Regex r=newRegex(pattern, RegexOptions.IgnoreCase);//获得匹配结果MatchCollection m =r.Matches(html);foreach(Match url1inm)
{stringhref1 = url1.Groups["url"].Value;if(!href1.Contains("http"))
{
href1= Global.WebUrl +href1;
}if(!StringHelper.IsPureUrl(href1) || links.Contains(href1))continue;
links.Add(href1);
}returnlinks;
}
这边下载图片有个任务条数限制,限制是200条。如果超过的话线程等待5秒,这里下载图片是异步调用的委托
publicstringDownLoadimg(stringurl)
{if(!string.IsNullOrEmpty(url))
{try{if(!url.Contains("http"))
{
url= Global.WebUrl +url;
}
HttpWebRequest request=(HttpWebRequest)WebRequest.Create(url);
request.Timeout=2000;
request.UserAgent="User-Agent:Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705";//是否允许302request.AllowAutoRedirect =true;
WebResponse response=request.GetResponse();
Stream reader=response.GetResponseStream();//文件名stringaFirstName =Guid.NewGuid().ToString();//扩展名stringaLastName = url.Substring(url.LastIndexOf(".") +1, (url.Length - url.LastIndexOf(".") -1));
FileStream writer=newFileStream(Global.FloderUrl + aFirstName +"."+aLastName, FileMode.OpenOrCreate, FileAccess.Write);byte[] buff =newbyte[512];//实际读取的字节数intc =0;while((c = reader.Read(buff,0, buff.Length)) >0)
{
writer.Write(buff,0, c);
}
writer.Close();
writer.Dispose();
reader.Close();
reader.Dispose();
response.Close();return(aFirstName +"."+aLastName);
}catch(Exception)
{return"错误:地址"+url;
}
}return"错误:地址为空";
}
话不多说,更多的需要大家自己去改进咯!欢迎读者来与楼主进行交流。如果本文对您有参考价值,欢迎帮博主点下文章下方的推荐,谢谢
下面源码送上:嘿嘿要分的哦!
http://download.csdn.net/detail/nightmareyan/9627215