最初にcurlを使用し(2つの理由により高速であり、useragentを設定することで通常のブラウザのように見えることができます)、最後にsimple_html_dom
、PHP組み込みのdomDocumentでできることを気にしません。
また$i
、各行に10列あるため、& 8をリセットしたくありません。これにより結果が歪むため、9をリセットすると期待どおりに新しい行が作成されます。私の例では、すべてのデータを配列に入れますが、それをデータベースのectに入れる必要があります.4ページでわかるように、ピークメモリ使用量は1.40MBです。
<?php
$url = 'http://avnetexpress.avnet.com/store/em/EMController/_/N-?Nn=50&Ns=PartNumber|0&action=excess_inventory&catalogId=&cutTape=&inStock=&langId=-1&myCatalog=&npi=&proto=®ionalStock=&rohs=&storeId=500201&term=&topSellers=&No=';
//4 pages
$result = run_scrap($url,100,25);
//Memory usage
$memory = array();
$memory['used'] = getReadableFileSize(memory_get_peak_usage());
$memory['total'] = ini_get("memory_limit").'B';
print_r($result);
print_r($memory); //Array ( [used] => 1.40 MB [total] => 128MB )
/** Result
* Array
(
[0] => Array
(
[title] => Logic and Timing - Crystals
[partnum] => ##BP11DCRK430
[manufactuere] => TOKO America
[price] => $0.3149
[availability] => 4500 Stock
)
[1] => Array
(
[title] => Inductor - Inductor Leaded
[partnum] => #187LY-471J
[manufactuere] => TOKO America
[price] => $0.3149
[availability] => 100 Stock
)
...
*/
function run_scrap($url,$total_items=100,$step=25){
$range = range(0,$total_items,$step);
$result = array();
foreach($range as $page){
$src = curl_get($url.$page);
$result = array_merge($result,process($src));
}
return $result;
}
function process($src){
$return = array();
$dom = new DOMDocument("1.0","UTF-8");
@$dom->loadHTML($src);
$dom->preserveWhiteSpace = false;
$return = array();
$i=0;
$r=0;
foreach($dom->getElementsByTagName('td') as $ret) {
if($ret->getAttribute('class') == 'small dataTd'){
switch($i){
case 1:
$return[$r]['title'] = trim($ret->nodeValue);
break;
case 3:
$return[$r]['partnum'] = trim($ret->nodeValue);
break;
case 4:
$return[$r]['manufactuere'] = trim($ret->nodeValue);
break;
case 7:
$return[$r]['price'] = trim($ret->nodeValue);
break;
case 8:
$return[$r]['availability'] = trim($ret->nodeValue);
break;
default:
break;
}
//Reset after col 9
if($i == 9){
$i = 0;
$r++;
}else{
$i++;
}
}
}
return $return;
}
function curl_get($url){
$return = '';
(function_exists('curl_init')) ? '' : die('cURL Must be installed!');
$curl = curl_init();
$header[0] = "Accept: text/xml,application/xml,application/json,application/xhtml+xml,";
$header[0] .= "text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5";
$header[] = "Cache-Control: max-age=0";
$header[] = "Connection: keep-alive";
$header[] = "Keep-Alive: 300";
$header[] = "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7";
$header[] = "Accept-Language: en-us,en;q=0.5";
curl_setopt($curl, CURLOPT_URL, $url);
curl_setopt($curl, CURLOPT_USERAGENT, 'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/2008092313 Ubuntu/9.25 (jaunty) Firefox/3.8');
curl_setopt($curl, CURLOPT_HTTPHEADER, $header);
curl_setopt($curl, CURLOPT_HEADER, 0);
curl_setopt($curl, CURLOPT_RETURNTRANSFER, true);
curl_setopt($curl, CURLOPT_FOLLOWLOCATION, true);
curl_setopt($curl, CURLOPT_TIMEOUT, 30);
curl_setopt($curl, CURLOPT_SSL_VERIFYPEER, false);
$result = curl_exec($curl);
curl_close($curl);
return $result;
}
//Debug Function - not related to the scrapper
function getReadableFileSize($size, $retstring = null) {
$sizes = array('bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB');
if ($retstring === null) { $retstring = '%01.2f %s'; }
$lastsizestring = end($sizes);
foreach ($sizes as $sizestring) {
if ($size < 1024) { break; }
if ($sizestring != $lastsizestring) { $size /= 1024; }
}
if ($sizestring == $sizes[0]) { $retstring = '%01d %s'; }
return sprintf($retstring, $size, $sizestring);
}
?>