Chrome内存问题 - File API + AngularJS [英] Chrome memory issue - File API + AngularJS

查看:244
本文介绍了Chrome内存问题 - File API + AngularJS的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我有一个需要将大文件上传到Azure BLOB存储的网络应用程序。我的解决方案使用HTML5 File API分割成块,然后将它们作为blob块放置,块的ID存储在一个数组中,然后将这些块作为blob提交。

解决方案在IE中运行正常。在64位Chrome上,我已成功上传4Gb文件,但看到非常繁重的内存使用情况(2Gb +)。在32位Chrome上,具体的chrome进程将达到500-550Mb左右,然后崩溃。



我无法看到任何明显的内存泄漏或我可以改变的帮助垃圾收集。我将块ID存储在一个数组中,所以显然会有一些内存蠕变,但这不应该是巨大的。这就好像File API将整个文件保存在内存中一样。



它被写为一个从控制器调用的Angular服务,我认为服务代码是相关:

 (function(){
'use strict';

angular
.module('app.core')
.factory('blobUploadService',
[
'$ http','stringUtilities',
blobUploadService
]);

函数blobUploadService($ http,stringUtilities){

var defaultBlockSize = 1024 * 1024; //默认为1024KB
var stopWatch = {};
var state = {};

var initializeState = function(config){
var blockSize = defaultBlockSize;
if(config.blockSize)blockSize = config.blockSize ;

var maxBlockSize = blockSize;
var numberOfBlocks = 1;

var file = config.file;

var fileSize = f ile.size;
if(fileSize< blockSize){
maxBlockSize = fileSize;
}

if(fileSize%maxBlockSize === 0){
numberOfBlocks = fileSize / maxBlockSize;
} else {
numberOfBlocks = parseInt(fileSize / maxBlockSize,10)+ 1;
}

return {
maxBlockSize:maxBlockSize,
numberOfBlocks:numberOfBlocks,
totalBytesRemaining:fileSize,
currentFilePointer:0,
blockIds:new Array(),
blockIdPrefix:'block-',
bytesUploaded:0,
submitUri:null,
file:file,
baseUrl:config .baseUrl,
sasToken:config.sasToken,
fileUrl:config.baseUrl + config.sasToken,
进度:config.progress,
完成:config.complete,
错误:config.error,
取消:false
};
};
$ b $ * config:{
baseUrl:// baseUrl for blob file uri(ie http://< accountName> .blob.core.windows.net /< container> / < blobname>),
sasToken://共享访问签名querystring键/值,前缀为?,
file://使用HTML5 File API的文件对象
progress://进度回调函数,
完成://完成回调函数,
错误://错误回调函数,
blockSize://使用它来覆盖defaultBlockSize
} * /
var upload = function(config){
state = initializeState(config);

var reader = new FileReader();
reader.onloadend = function(evt){
if(evt.target.readyState === FileReader.DONE&&&!state.cancelled){//完成=== 2
var uri = state.fileUrl +'& comp = block& blockid ='+ state.blockIds [state.blockIds.length - 1];
var requestData = new Uint8Array(evt.target.result);

$ http.put(uri,
requestData,
{
headers:{
'x-ms-blob-type':'BlockBlob' ,
'Content-Type':state.file.type
},
transformRequest:[]
})
.success(function(data,status,headers ,config){
state.bytesUploaded + = requestData.length;

var percentComplete =((parseFloat(state.bytesUploaded)/ parseFloat(state.file.size))* 100
).toFixed(2);
if(state.progress)state.progress(percentComplete,data,status,headers,config);
$ b $ uploadFileInBlocks(reader,state);
})
.error(函数(data,status,headers,config){
if(state.error)state.error(data,status,headers,config);
});
}
};

uploadFileInBlocks(reader,state);

return {
cancel:function(){
state.cancelled = true;
}
};
};

函数cancel(){
stopWatch = {};
state.cancelled = true;
返回true;
}

函数startStopWatch(handle){
if(stopWatch [handle] === undefined){
stopWatch [handle] = {};
stopWatch [handle] .start = Date.now();



function stopStopWatch(handle){
stopWatch [handle] .stop = Date.now();
var duration = stopWatch [handle] .stop - stopWatch [handle] .start;
删除stopWatch [句柄];
回报期;
}

var commitBlockList = function(state){
var uri = state.fileUrl +'& comp = blocklist';

var requestBody ='<?xml version =1.0encoding =utf-8?>< BlockList>';
for(var i = 0; i< state.blockIds.length; i ++){
requestBody + ='< Latest>'+ state.blockIds [i] +'< / Latest> ;
}
requestBody + ='< / BlockList>';

$ http.put(uri,
requestBody,
{
headers:{
'x-ms-blob-content-type':state (数据,状态,头文件,配置文件){
if(state.complete)state.complete(data,status ,header,config);
})
.error(function(data,status,headers,config){
if(state.error)state.error(data,status,headers, config);
//如果发生错误,则异步调用
//或服务器返回错误状态的响应
});
};
$ b $ var uploadFileInBlocks = function(reader,state){
if(!state.cancelled){
if(state.totalBytesRemaining> 0){

var fileContent = state.file.slice(state.currentFilePointer,
state.currentFilePointer + state.maxBlockSize);
var blockId = state.blockIdPrefix + stringUtilities.pad(state.blockIds.length,6);

state.blockIds.push(btoa(blockId));
reader.readAsArrayBuffer(fileContent);

state.currentFilePointer + = state.maxBlockSize;
state.totalBytesRemaining - = state.maxBlockSize;
if(state.totalBytesRemaining< state.maxBlockSize){
state.maxBlockSize = state.totalBytesRemaining;
}
} else {
commitBlockList(state);
}
}
};

返回{
上传:上传,
取消:取消,
startStopWatch:startStopWatch,
stopStopWatch:stopStopWatch
};
};
})();

有什么方法可以移动对象的范围以帮助使用Chrome GC?我已经看到其他人提到类似的问题,但理解Chromium已经解决了一些问题。



我应该说我的解决方案很大程度上基于Gaurav Mantri的博客帖子:



http://gauravmantri.com/2013/02/16/uploading-large-files-in-windows-azure-blob-storage-使用共享访问签名-html-and-javascript /#comment-47480

解决方案


我看不到任何明显的内存泄漏或我可以更改以帮助
垃圾回收的事情。我将块ID存储在一个数组中,所以显然
会有一些内存蠕变,但这不应该是巨大的。这是
,就好像File API将整个文件分成
内存一样。正确。由 .slice()创建的新的 Blob 被保存在内存中。
$ b 解决方法是在 Blob中调用 Blob.prototype.close()处理 Blob File 对象时,/ code>引用完成。



还要注意,在 javascript下问题还会创建一个新的 FileReader实例,如果 upload 函数被多次调用。


4.3.1。切片方法



slice()方法返回一个新的 Blob从可选的 start 参数的字节范围为
对象,但不包括
可选的 end 参数以及类型属性,该属性是可选 contentType


Blob 实例存在于文档。虽然 Blob 一旦从 Blob URL Store


9.6。 Blob URL的生命周期



注意:用户代理可以自由地从
中移除垃圾收集资源 Blob URL Store



$ p
$ b


每个 Blob 必须有内部 快照状态 ,如果存在任何这样的
底层存储,则必须将
初始设置为底层存储的状态,并且必须通过
StructuredClone 。快照状态的进一步规范性定义可以为 File s找到







4.3.2。关闭方法



close()方法被称为 close a Blob ,并且必须按照
的方式运行:


  1. 如果 readability state li>否则,将上下文对象可读性状态设置为 CLOSED
  2. 如果上下文对象在 Blob URL Store ,移除与上下文对象对应的条目。


如果 Blob 对象被传递给 URL.createObjectURL(),在 Blob上调用 URL.revokeObjectURL() / code>或 File 对象,然后调用 .close()


revokeObjectURL(url) 静态方法



lockquote>

撤销中提供的/w3c.github.io/FileAPI/#blob-urlrel =nofollow noreferrer> Blob URL url ,方法是从Blob URL Store中删除相应的条目。此方法必须按如下所示执行

1.如果
url 引用了一个 Blob ,它有一个可读性状态 of CLOSED 或者如果为 url 参数提供的值是
而不是 Blob URL ,或者如果为 url 参数提供的值
中没有条目, Blob URL Store ,这个方法调用做
什么也没有。用户代理可能会在错误控制台上显示一条消息。
2.否则,用户代理必须 删除条目 Blob URL Store



您可以通过打开

chrome:// blob-internals

查看调用之前和之后的详细信息创建 Blob 并关闭 Blob



例如,来自

  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 
Refcount:1
Content Type:text / plain
类型:数据
长度:3

  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 
Refcount:1
Content Type:text / plain

在调用 .close()之后。同样来自

  blob:http://example.com/c2823f75-de26-46f9-a4e5-95f57b8230bd 
Uuid :29e430a6-f093-40c2-bc70-2b6838a713bc






另一种方法可以是将文件发送为 ArrayBuffer 或数组缓冲区块。然后在服务器上重新组装文件。

或者您可以调用 FileReader 构造函数, FileReader.prototype.readAsArrayBuffer() code>和加载事件 FileReader 每一次。



load 事件 FileReader 传递 ArrayBuffer to Uint8Array ,使用 ReadableStream TypedArray.prototype.subarray() .getReader() .read()得到 N ArrayBuffer 的块作为 TypedArray at pull Uint8Array 。当 N 块等于 .byteLength ArrayBuffer 已处理完毕,将 Uint8Array s的数组传递给 Blob 构造函数,以便在浏览器中将文件部分重新组合为单个文件;然后发送 Blob 到服务器。

 <!DOCTYPE html> 
< html>

< head>
< / head>

< body>
< input id =filetype =file>
< br>
< progress value =0>< / progress>
< br>
< output for =file>< img alt =preview>< / output>
< script type =text / javascript>
const [input,output,img,progress,fr,handleError,CHUNK] = [
document.querySelector(input [type ='file'])
,document.querySelector document.querySelector(progress)
,new FileReader
, (err)=> console.log(err)
,1024 * 1024
];

progress.addEventListener(progress,e => {
progress.value = e.detail.value;
e.detail.promise();
});

let [chunks,NEXT,CURR,url,blob] = [Array(),0,0];

input.onchange =()=> {
NEXT = CURR = progress.value = progress.max = chunks.length = 0;
if(url){
URL.revokeObjectURL(url);
if(blob.hasOwnProperty(close)){
blob.close();



if(input.files.length){
console.log(input.files [0]);
progress.max = input.files [0] .size;
progress.step = progress.max / CHUNK;
fr.readAsArrayBuffer(input.files [0]);
}

}

fr.onload =()=> {
const VIEW = new Uint8Array(fr.result);
const LEN = VIEW.byteLength;
const {type,name:filename} = input.files [0];
const stream = new ReadableStream({
pull(controller){
if(NEXT< LEN){
controller
.enqueue(VIEW.subarray(NEXT, !; NEXT?CHUNK:CHUNK + NEXT));
NEXT + = CHUNK;
} else {
controller.close();
}
},
取消(原因){
console.log(原因);
抛出新错误(原因);
}
});

const [reader,processData] = [
stream.getReader()
,({value,done})=> {
if(done){
return reader.closed.then(()=> chunks);
}
chunks.push(value);
return new Promise(resolve => {
progress.dispatchEvent(
new CustomEvent(progress,{
detail:{
value:CURR + = value .byteLength,
promise:resolve
}
})
);
})
.then(()=> reader.read() (data => processData(data)))
.catch(e => reader.cancel(e))
}
]; (data => processData(data))
.then(data => {
blob = new Blob($)数据,{type});
console.log(complete,data,blob);
if(/image/.test(type)){
url = URL.createObjectURL( blob);
img.onload =()=> {
img.title =文件名;
input.value =;
}
img.src = url;
} else {
input.value =;
}
})
.catch(e => handleError(e))

}
< / script>

< / body>

< / html>

plnkr http://plnkr.co/edit/AEZ7iQce4QaJOKut71jk?p=preview






您还可以使用利用 fetch()

  fetch新请求(/ path / to / server /,{method:PUT,body:blob}))




传输正文 申请请求 请求,运行这些
的步骤:


  1. body 成为请求的 body body 为null,则在请求上对请求任务进行排队,以处理请求的请求结束请求和放弃这些步骤。
  2. 阅读 body 流中的块的结果 / p>


    • 读取 时,已完成完成的对象 property属性为false,其属性为 Uint8Array 对象,运行这些
      子步:


      1. bytes 为由 Uint8Array >对象。

      2. 传输字节

      3. 传输的字节长度字节
      4. 再次运行上述步骤。 b

      5. 完成属性是真实的,在请求上排队获取任务来处理请求的请求结束
        请求。
      6. 读取 时,其值与两者都不匹配上面的模式或阅读被拒绝,并终止正在进行的
        提取,原因是致命



另请参阅


I have a web app that needs to upload large files to Azure BLOB storage. My solution uses HTML5 File API to slice into chunks which are then put as blob blocks, the IDs of the blocks are stored in an array and then the blocks are committed as a blob.

The solution works fine in IE. On 64 bit Chrome I have successfully uploaded 4Gb files but see very heavy memory usage (2Gb+). On 32 bit Chrome the specific chrome process will get to around 500-550Mb and then crash.

I can't see any obvious memory leaks or things I can change to help garbage collection. I store the block IDs in an array so obviously there will be some memory creeep but this shouldn't be massive. It's almost as if the File API is holding the whole file it slices into memory.

It's written as an Angular service called from a controller, I think just the service code is pertinent:

(function() {
    'use strict';

    angular
    .module('app.core')
    .factory('blobUploadService',
    [
        '$http', 'stringUtilities',
        blobUploadService
    ]);

function blobUploadService($http, stringUtilities) {

    var defaultBlockSize = 1024 * 1024; // Default to 1024KB
    var stopWatch = {};
    var state = {};

    var initializeState = function(config) {
        var blockSize = defaultBlockSize;
        if (config.blockSize) blockSize = config.blockSize;

        var maxBlockSize = blockSize;
        var numberOfBlocks = 1;

        var file = config.file;

        var fileSize = file.size;
        if (fileSize < blockSize) {
            maxBlockSize = fileSize;
        }

        if (fileSize % maxBlockSize === 0) {
            numberOfBlocks = fileSize / maxBlockSize;
        } else {
            numberOfBlocks = parseInt(fileSize / maxBlockSize, 10) + 1;
        }

        return {
            maxBlockSize: maxBlockSize,
            numberOfBlocks: numberOfBlocks,
            totalBytesRemaining: fileSize,
            currentFilePointer: 0,
            blockIds: new Array(),
            blockIdPrefix: 'block-',
            bytesUploaded: 0,
            submitUri: null,
            file: file,
            baseUrl: config.baseUrl,
            sasToken: config.sasToken,
            fileUrl: config.baseUrl + config.sasToken,
            progress: config.progress,
            complete: config.complete,
            error: config.error,
            cancelled: false
        };
    };

    /* config: {
      baseUrl: // baseUrl for blob file uri (i.e. http://<accountName>.blob.core.windows.net/<container>/<blobname>),
      sasToken: // Shared access signature querystring key/value prefixed with ?,
      file: // File object using the HTML5 File API,
      progress: // progress callback function,
      complete: // complete callback function,
      error: // error callback function,
      blockSize: // Use this to override the defaultBlockSize
    } */
    var upload = function(config) {
        state = initializeState(config);

        var reader = new FileReader();
        reader.onloadend = function(evt) {
            if (evt.target.readyState === FileReader.DONE && !state.cancelled) { // DONE === 2
                var uri = state.fileUrl + '&comp=block&blockid=' + state.blockIds[state.blockIds.length - 1];
                var requestData = new Uint8Array(evt.target.result);

                $http.put(uri,
                        requestData,
                        {
                            headers: {
                                'x-ms-blob-type': 'BlockBlob',
                                'Content-Type': state.file.type
                            },
                            transformRequest: []
                        })
                    .success(function(data, status, headers, config) {
                        state.bytesUploaded += requestData.length;

                        var percentComplete = ((parseFloat(state.bytesUploaded) / parseFloat(state.file.size)) * 100
                        ).toFixed(2);
                        if (state.progress) state.progress(percentComplete, data, status, headers, config);

                        uploadFileInBlocks(reader, state);
                    })
                    .error(function(data, status, headers, config) {
                        if (state.error) state.error(data, status, headers, config);
                    });
            }
        };

        uploadFileInBlocks(reader, state);

        return {
            cancel: function() {
                state.cancelled = true;
            }
        };
    };

    function cancel() {
        stopWatch = {};
        state.cancelled = true;
        return true;
    }

    function startStopWatch(handle) {
        if (stopWatch[handle] === undefined) {
            stopWatch[handle] = {};
            stopWatch[handle].start = Date.now();
        }
    }

    function stopStopWatch(handle) {
        stopWatch[handle].stop = Date.now();
        var duration = stopWatch[handle].stop - stopWatch[handle].start;
        delete stopWatch[handle];
        return duration;
    }

    var commitBlockList = function(state) {
        var uri = state.fileUrl + '&comp=blocklist';

        var requestBody = '<?xml version="1.0" encoding="utf-8"?><BlockList>';
        for (var i = 0; i < state.blockIds.length; i++) {
            requestBody += '<Latest>' + state.blockIds[i] + '</Latest>';
        }
        requestBody += '</BlockList>';

        $http.put(uri,
                requestBody,
                {
                    headers: {
                        'x-ms-blob-content-type': state.file.type
                    }
                })
            .success(function(data, status, headers, config) {
                if (state.complete) state.complete(data, status, headers, config);
            })
            .error(function(data, status, headers, config) {
                if (state.error) state.error(data, status, headers, config);
                // called asynchronously if an error occurs
                // or server returns response with an error status.
            });
    };

    var uploadFileInBlocks = function(reader, state) {
        if (!state.cancelled) {
            if (state.totalBytesRemaining > 0) {

                var fileContent = state.file.slice(state.currentFilePointer,
                    state.currentFilePointer + state.maxBlockSize);
                var blockId = state.blockIdPrefix + stringUtilities.pad(state.blockIds.length, 6);

                state.blockIds.push(btoa(blockId));
                reader.readAsArrayBuffer(fileContent);

                state.currentFilePointer += state.maxBlockSize;
                state.totalBytesRemaining -= state.maxBlockSize;
                if (state.totalBytesRemaining < state.maxBlockSize) {
                    state.maxBlockSize = state.totalBytesRemaining;
                }
            } else {
                commitBlockList(state);
            }
        }
    };

    return {
        upload: upload,
        cancel: cancel,
        startStopWatch: startStopWatch,
        stopStopWatch: stopStopWatch
    };
};
})();

Are there any ways I can move the scope of objects to help with Chrome GC? I have seen other people mentioning similar issues but understood Chromium had resolved some.

I should say my solution is heavily based on Gaurav Mantri's blog post here:

http://gauravmantri.com/2013/02/16/uploading-large-files-in-windows-azure-blob-storage-using-shared-access-signature-html-and-javascript/#comment-47480

解决方案

I can't see any obvious memory leaks or things I can change to help garbage collection. I store the block IDs in an array so obviously there will be some memory creeep but this shouldn't be massive. It's almost as if the File API is holding the whole file it slices into memory.

You are correct. The new Blobs created by .slice() are being held in memory.

The solution is to call Blob.prototype.close() on the Blob reference when processing Blob or File object is complete.

Note also, at javascript at Question also creates a new instance of FileReader if upload function is called more than once.

4.3.1. The slice method

The slice() method returns a new Blob object with bytes ranging from the optional start parameter up to but not including the optional end parameter, and with a type attribute that is the value of the optional contentType parameter.

Blob instances exist for the life of document. Though Blob should be garbage collected once removed from Blob URL Store

9.6. Lifetime of Blob URLs

Note: User agents are free to garbage collect resources removed from the Blob URL Store.

Each Blob must have an internal snapshot state, which must be initially set to the state of the underlying storage, if any such underlying storage exists, and must be preserved through StructuredClone. Further normative definition of snapshot state can be found for Files.

4.3.2. The close method

The close() method is said to close a Blob, and must act as follows:

  1. If the readability state of the context object is CLOSED, terminate this algorithm.
  2. Otherwise, set the readability state of the context object to CLOSED.
  3. If the context object has an entry in the Blob URL Store, remove the entry that corresponds to the context object.

If Blob object is passed to URL.createObjectURL(), call URL.revokeObjectURL() on Blob or File object, then call .close().

The revokeObjectURL(url) static method

Revokes the Blob URL provided in the string url by removing the corresponding entry from the Blob URL Store. This method must act as follows: 1. If the url refers to a Blob that has a readability state of CLOSED OR if the value provided for the url argument is not a Blob URL, OR if the value provided for the url argument does not have an entry in the Blob URL Store, this method call does nothing. User agents may display a message on the error console. 2. Otherwise, user agents must remove the entry from the Blob URL Store for url.

You can view the result of these calls by opening

chrome://blob-internals 

reviewing details of before and after calls which create Blob and close Blob.

For example, from

xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
Refcount: 1
Content Type: text/plain
Type: data
Length: 3

to

xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
Refcount: 1
Content Type: text/plain

following call to .close(). Similarly from

blob:http://example.com/c2823f75-de26-46f9-a4e5-95f57b8230bd
Uuid: 29e430a6-f093-40c2-bc70-2b6838a713bc


An alternative approach could be to send file as an ArrayBuffer or chunks of array buffers. Then re-assemble the file at server.

Or you can call FileReader constructor, FileReader.prototype.readAsArrayBuffer(), and load event of FileReader each once.

At load event of FileReader pass ArrayBuffer to Uint8Array, use ReadableStream, TypedArray.prototype.subarray(), .getReader(), .read() to get N chunks of ArrayBuffer as a TypedArray at pull from Uint8Array. When N chunks equaling .byteLength of ArrayBuffer have been processed, pass array of Uint8Arrays to Blob constructor to recombine file parts into single file at browser; then send Blob to server.

<!DOCTYPE html>
<html>

<head>
</head>

<body>
  <input id="file" type="file">
  <br>
  <progress value="0"></progress>
  <br>
  <output for="file"><img alt="preview"></output>
  <script type="text/javascript">
    const [input, output, img, progress, fr, handleError, CHUNK] = [
      document.querySelector("input[type='file']")
      , document.querySelector("output[for='file']")
      , document.querySelector("output img")
      , document.querySelector("progress")
      , new FileReader
      , (err) => console.log(err)
      , 1024 * 1024
    ];

    progress.addEventListener("progress", e => {
      progress.value = e.detail.value;
      e.detail.promise();
    });

    let [chunks, NEXT, CURR, url, blob] = [Array(), 0, 0];

    input.onchange = () => {
      NEXT = CURR = progress.value = progress.max = chunks.length = 0;
      if (url) {
        URL.revokeObjectURL(url);
        if (blob.hasOwnProperty("close")) {
          blob.close();
        }
      }

      if (input.files.length) {
        console.log(input.files[0]);
        progress.max = input.files[0].size;
        progress.step = progress.max / CHUNK;
        fr.readAsArrayBuffer(input.files[0]);
      }

    }

    fr.onload = () => {
      const VIEW = new Uint8Array(fr.result);
      const LEN = VIEW.byteLength;
      const {type, name:filename} = input.files[0];
      const stream = new ReadableStream({
          pull(controller) {
            if (NEXT < LEN) {
              controller
              .enqueue(VIEW.subarray(NEXT, !NEXT ? CHUNK : CHUNK + NEXT));
               NEXT += CHUNK;
            } else {
              controller.close();
            }
          },
          cancel(reason) {
            console.log(reason);
            throw new Error(reason);
          }
      });

      const [reader, processData] = [
        stream.getReader()
        , ({value, done}) => {
            if (done) {
              return reader.closed.then(() => chunks);
            }
            chunks.push(value);
            return new Promise(resolve => {
              progress.dispatchEvent(
                new CustomEvent("progress", {
                  detail:{
                    value:CURR += value.byteLength,
                    promise:resolve
                  }
                })
              );                
            })
            .then(() => reader.read().then(data => processData(data)))
            .catch(e => reader.cancel(e))
        }
      ];

      reader.read()
      .then(data => processData(data))
      .then(data => {
        blob = new Blob(data, {type});
        console.log("complete", data, blob);
        if (/image/.test(type)) {
          url = URL.createObjectURL(blob);
          img.onload = () => {
            img.title = filename;
            input.value = "";
          }
          img.src = url;
        } else {
          input.value = "";
        }             
      })
      .catch(e => handleError(e))

    }
  </script>

</body>

</html>

plnkr http://plnkr.co/edit/AEZ7iQce4QaJOKut71jk?p=preview


You can also use utilize fetch()

fetch(new Request("/path/to/server/", {method:"PUT", body:blob}))

To transmit body for a request request, run these steps:

  1. Let body be request’s body.
  2. If body is null, then queue a fetch task on request to process request end-of-body for request and abort these steps.

  3. Let read be the result of reading a chunk from body’s stream.

    • When read is fulfilled with an object whose done property is false and whose value property is a Uint8Array object, run these substeps:

      1. Let bytes be the byte sequence represented by the Uint8Array object.
      2. Transmit bytes.

      3. Increase body’s transmitted bytes by bytes’s length.

      4. Run the above step again.

    • When read is fulfilled with an object whose done property is true, queue a fetch task on request to process request end-of-body for request.

    • When read is fulfilled with a value that matches with neither of the above patterns, or read is rejected, terminate the ongoing fetch with reason fatal.

See also

这篇关于Chrome内存问题 - File API + AngularJS的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆