diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
commit | 2aa4a82499d4becd2284cdb482213d541b8804dd (patch) | |
tree | b80bf8bf13c3766139fbacc530efd0dd9d54394c /netwerk/test/unit/test_alt-data_too_big.js | |
parent | Initial commit. (diff) | |
download | firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip |
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'netwerk/test/unit/test_alt-data_too_big.js')
-rw-r--r-- | netwerk/test/unit/test_alt-data_too_big.js | 113 |
1 files changed, 113 insertions, 0 deletions
diff --git a/netwerk/test/unit/test_alt-data_too_big.js b/netwerk/test/unit/test_alt-data_too_big.js new file mode 100644 index 0000000000..d928394272 --- /dev/null +++ b/netwerk/test/unit/test_alt-data_too_big.js @@ -0,0 +1,113 @@ +/** + * Test for handling too big alternative data + * + * - first we try to open an output stream for too big alt-data which must fail + * and leave original data intact + * + * - then we open the output stream without passing predicted data size which + * succeeds but writing must fail later at the size limit and the original + * data must be kept + */ + +"use strict"; + +var data = "data "; +var altData = "alt-data"; + +function run_test() { + do_get_profile(); + + // Expand both data to 1MB + for (let i = 0; i < 17; i++) { + data += data; + altData += altData; + } + + // Set the limit so that the data fits but alt-data doesn't. + Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 1800); + + write_data(); + + do_test_pending(); +} + +function write_data() { + asyncOpenCacheEntry( + "http://data/", + "disk", + Ci.nsICacheStorage.OPEN_NORMALLY, + null, + function(status, entry) { + Assert.equal(status, Cr.NS_OK); + + var os = entry.openOutputStream(0, -1); + var written = os.write(data, data.length); + Assert.equal(written, data.length); + os.close(); + + open_big_altdata_output(entry); + } + ); +} + +function open_big_altdata_output(entry) { + try { + entry.openAlternativeOutputStream("text/binary", altData.length); + } catch (e) { + Assert.equal(e.result, Cr.NS_ERROR_FILE_TOO_BIG); + } + entry.close(); + + check_entry(write_big_altdata); +} + +function write_big_altdata() { + asyncOpenCacheEntry( + "http://data/", + "disk", + Ci.nsICacheStorage.OPEN_NORMALLY, + null, + function(status, entry) { + Assert.equal(status, Cr.NS_OK); + + var os = entry.openAlternativeOutputStream("text/binary", -1); + try { + os.write(altData, altData.length); + } catch (e) { + Assert.equal(e.result, Cr.NS_ERROR_FILE_TOO_BIG); + } + os.close(); + entry.close(); + + check_entry(do_test_finished); + } + ); +} + +function check_entry(cb) { + asyncOpenCacheEntry( + "http://data/", + "disk", + Ci.nsICacheStorage.OPEN_NORMALLY, + null, + function(status, entry) { + Assert.equal(status, Cr.NS_OK); + + var is = null; + try { + is = entry.openAlternativeInputStream("text/binary"); + } catch (e) { + Assert.equal(e.result, Cr.NS_ERROR_NOT_AVAILABLE); + } + + is = entry.openInputStream(0); + pumpReadStream(is, function(read) { + Assert.equal(read.length, data.length); + is.close(); + entry.close(); + + executeSoon(cb); + }); + } + ); +} |