从多个网址的file_get_contents [英] file_get_contents from multiple url

查看:118
本文介绍了从多个网址的file_get_contents的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我要保存来自多个URL页面内容的文件。

有关开始我从数组网站网址

  $位置=阵列(
        'URL'=> 'http://onesite.com/index.php?c='.$row['$c$c0'].'&o='.$row['$c$c1'].'&y='.$row['$c$c2'].'&a='.$row['cod3'].'&sid=', '选择'=> table.tabel TR
    );

有关saveving文件,我有尝试:

 的foreach($站点$ N){
$引荐='reffername';
$头[] =接受: text/xml,application/xml,application/json,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5\";
$头[] =缓存控制:最大年龄= 0;
$头[] =连接:保持活动;
$头[] =保持活动:300;
$头[] =接收字符集:ISO-8859-1,UTF-8,Q = 0.7 *; Q = 0.7;
$头[] =接受语言:EN-US,连接; Q = 0.5;$ =选择采用阵列('HTTP'=>阵列('方法'=>中GET,
                            '头'=>破灭('\\ r \\ n',$头)为\\ r \\ n。
                            引荐:$引用者\\ r \\ n,
                            USER_AGENT'=> Mozilla的/ 5.0(X11; U; Linux的i686的; PL-PL; RV:1.9.0.2)的Gecko / 2008092313的Ubuntu / 9.25(jaunty中)的Firefox / 3.8));
$语境= stream_context_create($ OPTS);$数据=的file_get_contents($网站[网址],虚假,$背景);$文件= MD5('的$ id');的file_put_contents($文件,$数据);
$内容=反序列化(的file_get_contents($文件));
}


解决方案

基本卷曲多脚本:

  //您的网址阵列持有的文件的链接
$网址=阵列();//卷曲的多柄
$ MH = curl_multi_init();//这将持有的卷发为每个文件的请求
$请求=阵列();$选项=数组(
    CURLOPT_FOLLOWLOCATION =>真正,
    CURLOPT_AUTOREFERER =>真正,
    CURLOPT_USERAGENT => 在这里粘贴您的用户代理字符串,
    CURLOPT_HEADER =>假,
    CURLOPT_SSL_VERIFYPEER =>假,
    CURLOPT_RETURNTRANSFER =>真正
);//对应文件流数组为每个文件
$ fstreams =阵列();$文件夹='内容/';
如果(!file_exists($文件夹)){的mkdir($文件夹,0777,真正的); }的foreach($网址为$关键=> $网址)
{
    //初始化添加卷曲对象数组
    $请求[$关键] = curl_init($网址);    //设置卷曲对象的选项
    curl_setopt_array($请求[$键],$选项);    //于URL中提取文件名,并创建相应的本地路径
    $ PATH = parse_url($网址,PHP_URL_PATH);
    $文件名= PATHINFO($路径,PATHINFO_FILENAME))。' - '$关键。 //或者任何你想要的
    $文件路径= $文件夹$文件名。    //打开一个文件流为每个文件并将其分配给相应的卷曲对象
    $ fstreams [$关键] =的fopen($文件路径,'W');
    curl_setopt($请求[$键],CURLOPT_FILE,$ fstreams [$关键]);    //添加卷曲对象的多柄
    curl_multi_add_handle($ MH,$请求[$关键]);
}//完成,而所有的请求已经完成
做{
   curl_multi_exec($ MH,$活动);
}而($积极大于0);//这里收集的所有数据和清理
的foreach($请求为$关键=> $请求){    //返回$ [$关键] = curl_multi_getcontent($请求); //使用这个,如果你不下载到的文件,同时删除CURLOPT_FILE选项fstreams阵列
    curl_multi_remove_handle($ MH,$请求); //假设我们负责我们的资源管理
    curl_close($请求); //再次负责。这必须curl_multi_getcontent()之后;
    FCLOSE($ fstreams [$关键]);
}curl_multi_close($ MH);

i want to save page content to files from multiple url.

For start i have sites url from array

$site = array( 
        'url' => 'http://onesite.com/index.php?c='.$row['code0'].'&o='.$row['code1'].'&y='.$row['code2'].'&a='.$row['cod3'].'&sid=', 'selector' => 'table.tabel tr'
    );  

For saveving files I have try:

foreach($site  as $n) {
$referer = 'reffername';


$header[] = "Accept: text/xml,application/xml,application/json,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5";
$header[] = "Cache-Control: max-age=0";
$header[] = "Connection: keep-alive";
$header[] = "Keep-Alive: 300";
$header[] = "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7";
$header[] = "Accept-Language: en-us,en;q=0.5";

$opts = array('http'=>array('method'=>"GET",
                            'header'=>implode('\r\n',$header)."\r\n".
                            "Referer: $referer\r\n",
                            'user_agent'=> "Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/2008092313 Ubuntu/9.25 (jaunty) Firefox/3.8"));
$context = stream_context_create($opts);

$data = file_get_contents($site["url"], false, $context);

$file = md5('$id');

file_put_contents($file, $data);
$content = unserialize(file_get_contents($file));
}

解决方案

Basic cURL multi script :

// Your URL array that hold links to files 
$urls = array(); 

// cURL multi-handle
$mh = curl_multi_init();

// This will hold cURLS requests for each file
$requests = array();

$options = array(
    CURLOPT_FOLLOWLOCATION => true,
    CURLOPT_AUTOREFERER    => true, 
    CURLOPT_USERAGENT      => 'paste your user agent string here',
    CURLOPT_HEADER         => false,
    CURLOPT_SSL_VERIFYPEER => false,
    CURLOPT_RETURNTRANSFER => true
);

//Corresponding filestream array for each file
$fstreams = array();

$folder = 'content/';
if (!file_exists($folder)){ mkdir($folder, 0777, true); }

foreach ($urls as $key => $url)
{
    // Add initialized cURL object to array
    $requests[$key] = curl_init($url);

    // Set cURL object options
    curl_setopt_array($requests[$key], $options);

    // Extract filename from URl and create appropriate local path
    $path     = parse_url($url, PHP_URL_PATH);
    $filename = pathinfo($path, PATHINFO_FILENAME)).'-'.$key; // Or whatever you want
    $filepath = $folder.$filename;

    // Open a filestream for each file and assign it to corresponding cURL object
    $fstreams[$key] = fopen($filepath, 'w');
    curl_setopt($requests[$key], CURLOPT_FILE, $fstreams[$key]);

    // Add cURL object to multi-handle
    curl_multi_add_handle($mh, $requests[$key]);
}

// Do while all request have been completed
do {
   curl_multi_exec($mh, $active);
} while ($active > 0);

// Collect all data here and clean up
foreach ($requests as $key => $request) {

    //$returned[$key] = curl_multi_getcontent($request); // Use this if you're not downloading into file, also remove CURLOPT_FILE option and fstreams array
    curl_multi_remove_handle($mh, $request); //assuming we're being responsible about our resource management
    curl_close($request);                    //being responsible again.  THIS MUST GO AFTER curl_multi_getcontent();
    fclose($fstreams[$key]);
}

curl_multi_close($mh);

这篇关于从多个网址的file_get_contents的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆