From a9c818418b81b93680170e1a84d4e221e578ad2f Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 09:56:53 +0200 Subject: Adding debian version 6.4.3+dfsg1-1. Signed-off-by: Daniel Baumann --- .../plupload/csharp/Plupload/App.xaml | 8 + .../plupload/csharp/Plupload/App.xaml.cs | 45 ++ .../plupload/csharp/Plupload/FJCore/DCT.cs | 222 ++++++ .../plupload/csharp/Plupload/FJCore/DecodedJpeg.cs | 121 +++ .../csharp/Plupload/FJCore/Decoder/HuffmanTable.cs | 487 ++++++++++++ .../Plupload/FJCore/Decoder/JpegComponent.cs | 702 ++++++++++++++++ .../csharp/Plupload/FJCore/Decoder/JpegDecoder.cs | 614 ++++++++++++++ .../csharp/Plupload/FJCore/Decoder/JpegFrame.cs | 283 +++++++ .../Plupload/FJCore/Decoder/JpegHuffmanTable.cs | 183 +++++ .../FJCore/Decoder/JpegQuantizationTable.cs | 116 +++ .../csharp/Plupload/FJCore/Decoder/JpegScan.cs | 37 + .../csharp/Plupload/FJCore/Encoder/JpegEncoder.cs | 327 ++++++++ .../plupload/csharp/Plupload/FJCore/FDCT.cs | 201 +++++ .../csharp/Plupload/FJCore/Filter/Convolution.cs | 404 ++++++++++ .../csharp/Plupload/FJCore/Filter/FilterBase.cs | 47 ++ .../Plupload/FJCore/Filter/FilterLowpassResize.cs | 44 + .../Plupload/FJCore/Filter/FilterNNResize.cs | 47 ++ .../csharp/Plupload/FJCore/Filter/GrayImage.cs | 77 ++ .../plupload/csharp/Plupload/FJCore/IJG.txt | 90 +++ .../csharp/Plupload/FJCore/IO/BinaryReader.cs | 46 ++ .../csharp/Plupload/FJCore/IO/BinaryWriter.cs | 45 ++ .../csharp/Plupload/FJCore/IO/JpegBinaryReader.cs | 117 +++ .../plupload/csharp/Plupload/FJCore/Image.cs | 183 +++++ .../plupload/csharp/Plupload/FJCore/JAI.txt | 40 + .../plupload/csharp/Plupload/FJCore/JpegMarker.cs | 128 +++ .../plupload/csharp/Plupload/FJCore/License.txt | 24 + .../plupload/csharp/Plupload/FJCore/README.txt | 30 + .../csharp/Plupload/FJCore/Resize/ImageResizer.cs | 98 +++ .../plupload/csharp/Plupload/FJCore/YCbCr.cs | 59 ++ .../plupload/csharp/Plupload/FJCore/ZigZag.cs | 65 ++ .../plupload/csharp/Plupload/FileReference.cs | 721 +++++++++++++++++ .../plupload/csharp/Plupload/Page.xaml | 7 + .../plupload/csharp/Plupload/Page.xaml.cs | 230 ++++++ .../plupload/csharp/Plupload/Plupload.csproj | 222 ++++++ .../plupload/csharp/Plupload/Plupload.sln | 20 + .../plupload/csharp/Plupload/PngEncoder/Adler32.cs | 216 +++++ .../plupload/csharp/Plupload/PngEncoder/CRC32.cs | 213 +++++ .../csharp/Plupload/PngEncoder/Deflater.cs | 543 +++++++++++++ .../Plupload/PngEncoder/DeflaterConstants.cs | 184 +++++ .../csharp/Plupload/PngEncoder/DeflaterEngine.cs | 832 +++++++++++++++++++ .../csharp/Plupload/PngEncoder/DeflaterHuffman.cs | 881 +++++++++++++++++++++ .../Plupload/PngEncoder/DeflaterOutputStream.cs | 469 +++++++++++ .../csharp/Plupload/PngEncoder/DeflaterPending.cs | 55 ++ .../csharp/Plupload/PngEncoder/IChecksum.cs | 90 +++ .../csharp/Plupload/PngEncoder/PendingBuffer.cs | 281 +++++++ .../csharp/Plupload/PngEncoder/PngEncoder.cs | 467 +++++++++++ .../csharp/Plupload/Properties/AppManifest.xml | 6 + .../csharp/Plupload/Properties/AssemblyInfo.cs | 45 ++ .../plupload/csharp/Plupload/Utils/JsonReader.cs | 486 ++++++++++++ 49 files changed, 10858 insertions(+) create mode 100644 debian/missing-sources/plupload/csharp/Plupload/App.xaml create mode 100644 debian/missing-sources/plupload/csharp/Plupload/App.xaml.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/DCT.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/DecodedJpeg.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/HuffmanTable.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegComponent.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegDecoder.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegFrame.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegHuffmanTable.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegQuantizationTable.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegScan.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Encoder/JpegEncoder.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/FDCT.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/Convolution.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterBase.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterLowpassResize.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterNNResize.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/GrayImage.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/IJG.txt create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/BinaryReader.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/BinaryWriter.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/JpegBinaryReader.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Image.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/JAI.txt create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/JpegMarker.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/License.txt create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/README.txt create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/Resize/ImageResizer.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/YCbCr.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FJCore/ZigZag.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/FileReference.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/Page.xaml create mode 100644 debian/missing-sources/plupload/csharp/Plupload/Page.xaml.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/Plupload.csproj create mode 100644 debian/missing-sources/plupload/csharp/Plupload/Plupload.sln create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/Adler32.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/CRC32.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/Deflater.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterConstants.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterEngine.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterHuffman.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterOutputStream.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterPending.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/IChecksum.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/PendingBuffer.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/PngEncoder/PngEncoder.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/Properties/AppManifest.xml create mode 100644 debian/missing-sources/plupload/csharp/Plupload/Properties/AssemblyInfo.cs create mode 100644 debian/missing-sources/plupload/csharp/Plupload/Utils/JsonReader.cs (limited to 'debian/missing-sources/plupload/csharp') diff --git a/debian/missing-sources/plupload/csharp/Plupload/App.xaml b/debian/missing-sources/plupload/csharp/Plupload/App.xaml new file mode 100644 index 0000000..32f7a7e --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/App.xaml @@ -0,0 +1,8 @@ + + + + + diff --git a/debian/missing-sources/plupload/csharp/Plupload/App.xaml.cs b/debian/missing-sources/plupload/csharp/Plupload/App.xaml.cs new file mode 100644 index 0000000..9683c6d --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/App.xaml.cs @@ -0,0 +1,45 @@ +/** + * App.xaml.cs + * + * Copyright 2009, Moxiecode Systems AB + * Released under GPL License. + * + * License: http://www.plupload.com/license + * Contributing: http://www.plupload.com/contributing + */ + +using System.Windows; +using System; +using System.Windows.Browser; + +namespace Moxiecode.Plupload { + /// + /// Partial class for the Silverlight application. + /// + public partial class App : Application { + public App() { + this.Startup += this.OnStartup; + this.UnhandledException += this.Application_UnhandledException; + + InitializeComponent(); + } + + private void OnStartup(object sender, StartupEventArgs e) { + this.RootVisual = new Page(e.InitParams); + } + + private void Application_UnhandledException(object sender, ApplicationUnhandledExceptionEventArgs e) { + if (!System.Diagnostics.Debugger.IsAttached) { + e.Handled = true; + + try { + string errorMsg = e.ExceptionObject.Message + @"\n" + e.ExceptionObject.StackTrace; + errorMsg = errorMsg.Replace("\"", "\\\"").Replace("\r\n", @"\n"); + + System.Windows.Browser.HtmlPage.Window.Eval("throw new Error(\"Unhandled Error in Silverlight 2 Application: " + errorMsg + "\");"); + } catch (Exception) { + } + } + } + } +} \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/DCT.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/DCT.cs new file mode 100644 index 0000000..e621799 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/DCT.cs @@ -0,0 +1,222 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt.. + +// NOTE: Compile with DYNAMIC_IDCT for a decode performance boost. +// May not yield a perceptible boost for small images, +// since there is some overhead in emitting CIL dynamically. + +using System; +using System.Reflection.Emit; +using System.Reflection; + +namespace FluxJpeg.Core +{ + /// + /// Implements the Discrete Cosine Transform with dynamic CIL + /// + public partial class DCT + { + private float[] _temp = new float[64]; + + // Cosine matrix and transposed cosine matrix + private static readonly float[,] c = buildC(); + private static readonly float[,] cT = buildCT(); + + internal DCT() + { + #if DYNAMIC_IDCT + dynamicIDCT = dynamicIDCT ?? EmitIDCT(); + #endif + } + + /// + /// Precomputes cosine terms in A.3.3 of + /// http://www.w3.org/Graphics/JPEG/itu-t81.pdf + /// + /// Closely follows the term precomputation in the + /// Java Advanced Imaging library. + /// + private static float[,] buildC() + { + float[,] c = new float[8, 8]; + + for (int i = 0; i < 8; i++) // i == u or v + { + for (int j = 0; j < 8; j++) // j == x or y + { + c[i, j] = i == 0 ? + 0.353553391f : /* 1 / SQRT(8) */ + (float)(0.5 * Math.Cos(((2.0 * j + 1) * i * Math.PI) / 16.0)); + } + } + + return c; + } + private static float[,] buildCT() + { + // Transpose i,k <-- j,i + float[,] cT = new float[8, 8]; + for (int i = 0; i < 8; i++) + for (int j = 0; j < 8; j++) + cT[j, i] = c[i, j]; + return cT; + } + + public static void SetValueClipped(byte[,] arr, int i, int j, float val) + { + // Clip into the 0...255 range & round + arr[i, j] = val < 0 ? (byte)0 + : val > 255 ? (byte)255 + : (byte)(val + 0.5); + } + + /// See figure A.3.3 IDCT (informative) on A-5. + /// http://www.w3.org/Graphics/JPEG/itu-t81.pdf + internal byte[,] FastIDCT(float[] input) + { + byte[,] output = new byte[8, 8]; + + #if DYNAMIC_IDCT + + // Fastest, dynamic MSIL stream + dynamicIDCT(input, _temp, output); + + #else + + #region Slower, easy-to-read, pure C# IDCT + + float temp, val = 0; + int idx = 0; + for (int i = 0; i < 8; i++) + { + for (int j = 0; j < 8; j++) + { + val = 0; + + for(int k = 0; k < 8; k++) + { + val += input[i * 8 + k] * c[k, j]; + } + + _temp[idx++] = val; + } + } + for (int i = 0; i < 8; i++) + { + for (int j = 0; j < 8; j++) + { + temp = 128f; + + for (int k = 0; k < 8; k++) + { + temp += cT[i, k] * _temp[k * 8 + j]; + } + + if (temp < 0) output[i, j] = 0; + else if (temp > 255) output[i, j] = 255; + else output[i, j] = (byte)(temp + 0.5); // Implements rounding + } + } + + + #endregion + + #endif + + return output; + } + + + + #if DYNAMIC_IDCT + + /// + /// Generates a pure-IL nonbranching stream of instructions + /// that perform the inverse DCT. Relies on helper function + /// SetValueClipped. + /// + /// A delegate to the DynamicMethod + private static IDCTFunc EmitIDCT() + { + Type[] args = { typeof(float[]), typeof(float[]), typeof(byte[,]) }; + + DynamicMethod idctMethod = new DynamicMethod("dynamicIDCT", + null, // no return type + args); // input arrays + + ILGenerator il = idctMethod.GetILGenerator(); + + int idx = 0; + + for (int i = 0; i < 8; i++) + { + for (int j = 0; j < 8; j++) + { + il.Emit(OpCodes.Ldarg_1); // 1 {temp} + il.Emit(OpCodes.Ldc_I4_S, (short)idx++); // 3 {temp, idx} + + for (int k = 0; k < 8; k++) + { + il.Emit(OpCodes.Ldarg_0); // {in} + il.Emit(OpCodes.Ldc_I4_S, (short)(i * 8 + k)); // {in,idx} + il.Emit(OpCodes.Ldelem_R4); // {in[idx]} + il.Emit(OpCodes.Ldc_R4, c[k, j]); // {in[idx],c[k,j]} + il.Emit(OpCodes.Mul); // {in[idx]*c[k,j]} + if (k != 0) il.Emit(OpCodes.Add); + } + + il.Emit(OpCodes.Stelem_R4); // {} + } + } + + var meth = typeof(DCT).GetMethod("SetValueClipped", + BindingFlags.Static | BindingFlags.Public, null, + CallingConventions.Standard, + new Type[] { + typeof(byte[,]), // arr + typeof(int), // i + typeof(int), // j + typeof(float) } // val + , null); + + for (int i = 0; i < 8; i++) + { + for (int j = 0; j < 8; j++) + { + il.Emit(OpCodes.Ldarg_2); // {output} + il.Emit(OpCodes.Ldc_I4_S, (short)i); // {output,i} + il.Emit(OpCodes.Ldc_I4_S, (short)j); // X={output,i,j} + + il.Emit(OpCodes.Ldc_R4, 128.0f); // {X,128.0f} + + for (int k = 0; k < 8; k++) + { + il.Emit(OpCodes.Ldarg_1); // {X,temp} + il.Emit(OpCodes.Ldc_I4_S, + (short)(k * 8 + j)); // {X,temp,idx} + il.Emit(OpCodes.Ldelem_R4); // {X,temp[idx]} + il.Emit(OpCodes.Ldc_R4, cT[i, k]); // {X,temp[idx],cT[i,k]} + il.Emit(OpCodes.Mul); // {X,in[idx]*c[k,j]} + il.Emit(OpCodes.Add); + } + + il.EmitCall(OpCodes.Call, meth, null); + } + } + + il.Emit(OpCodes.Ret); + + return (IDCTFunc)idctMethod.CreateDelegate(typeof(IDCTFunc)); + } + + private delegate void IDCTFunc(float[] input, float[] temp, byte[,] output); + private static IDCTFunc dynamicIDCT = null; +#endif + + + } + + + + +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/DecodedJpeg.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/DecodedJpeg.cs new file mode 100644 index 0000000..96ca213 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/DecodedJpeg.cs @@ -0,0 +1,121 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Collections.Generic; +using System.Text; + +namespace FluxJpeg.Core +{ + public class JpegHeader + { + public byte Marker; + public byte[] Data; + internal bool IsJFIF = false; + public new string ToString { get { return Encoding.UTF8.GetString(Data, 0, Data.Length); } } + } + + public class DecodedJpeg + { + private Image _image; public Image Image { get { return _image; } } + + internal int[] BlockWidth; + internal int[] BlockHeight; + + internal int Precision = 8; + internal int[] HsampFactor = { 1, 1, 1 }; + internal int[] VsampFactor = { 1, 1, 1 }; + internal bool[] lastColumnIsDummy = new bool[] { false, false, false }; + internal bool[] lastRowIsDummy = new bool[] { false, false, false }; + + internal int[] compWidth, compHeight; + internal int MaxHsampFactor; + internal int MaxVsampFactor; + + public bool HasJFIF { get; private set; } + + private List _metaHeaders; + + public IList MetaHeaders { get { return _metaHeaders.AsReadOnly(); } } + + public DecodedJpeg(Image image, IEnumerable metaHeaders) + { + _image = image; + + // Handles null as an empty list + _metaHeaders = (metaHeaders == null) ? + new List(0) : new List(metaHeaders); + + // Check if the JFIF header was present + foreach (JpegHeader h in _metaHeaders) + if (h.IsJFIF) { HasJFIF = true; break; } + + int components = _image.ComponentCount; + + compWidth = new int[components]; + compHeight = new int[components]; + BlockWidth = new int[components]; + BlockHeight = new int[components]; + + Initialize(); + } + + public DecodedJpeg(Image image) + : this(image, null) + { + _metaHeaders = new List(); + + string comment = "Jpeg Codec | fluxcapacity.net "; + + _metaHeaders.Add( + new JpegHeader() { + Marker = JPEGMarker.COM, + Data = System.Text.Encoding.UTF8.GetBytes(comment) + } + ); + } + + /// + /// This method creates and fills three arrays, Y, Cb, and Cr using the input image. + /// + private void Initialize() + { + int w = _image.Width, h = _image.Height; + + int y; + + MaxHsampFactor = 1; + MaxVsampFactor = 1; + + for (y = 0; y < _image.ComponentCount; y++) + { + MaxHsampFactor = Math.Max(MaxHsampFactor, HsampFactor[y]); + MaxVsampFactor = Math.Max(MaxVsampFactor, VsampFactor[y]); + } + for (y = 0; y < _image.ComponentCount; y++) + { + compWidth[y] = (((w % 8 != 0) ? ((int)Math.Ceiling((double)w / 8.0)) * 8 : w) / MaxHsampFactor) * HsampFactor[y]; + if (compWidth[y] != ((w / MaxHsampFactor) * HsampFactor[y])) + { + lastColumnIsDummy[y] = true; + } + + // results in a multiple of 8 for compWidthz + // this will make the rest of the program fail for the unlikely + // event that someone tries to compress an 16 x 16 pixel image + // which would of course be worse than pointless + + BlockWidth[y] = (int)Math.Ceiling((double)compWidth[y] / 8.0); + compHeight[y] = (((h % 8 != 0) ? ((int)Math.Ceiling((double)h / 8.0)) * 8 : h) / MaxVsampFactor) * VsampFactor[y]; + if (compHeight[y] != ((h / MaxVsampFactor) * VsampFactor[y])) + { + lastRowIsDummy[y] = true; + } + + BlockHeight[y] = (int)Math.Ceiling((double)compHeight[y] / 8.0); + } + } + + } + +} \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/HuffmanTable.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/HuffmanTable.cs new file mode 100644 index 0000000..3da2818 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/HuffmanTable.cs @@ -0,0 +1,487 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +// Partially derives from a Java encoder, JpegEncoder.java by James R Weeks. +// Implements Baseline JPEG Encoding http://www.opennet.ru/docs/formats/jpeg.txt + +using System; + +using FluxJpeg.Core.IO; +using System.IO; +using System.Collections.Generic; + +namespace FluxJpeg.Core +{ + internal class HuffmanTable + { + public static int HUFFMAN_MAX_TABLES = 4; + + private short[] huffcode = new short[256]; + private short[] huffsize = new short[256]; + private short[] valptr = new short[16]; + private short[] mincode = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,-1,-1}; + private short[] maxcode = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; + + private short[] huffval; + private short[] bits; + + int bufferPutBits, bufferPutBuffer; + internal int ImageHeight; + internal int ImageWidth; + internal int[,] DC_matrix0; + internal int[,] AC_matrix0; + internal int[,] DC_matrix1; + internal int[,] AC_matrix1; + internal int[][,] DC_matrix; + internal int[][,] AC_matrix; + internal int NumOfDCTables; + internal int NumOfACTables; + + public List bitsList; + public List val; + + + public static byte JPEG_DC_TABLE = 0; + public static byte JPEG_AC_TABLE = 1; + + private short lastk = 0; + + internal HuffmanTable(JpegHuffmanTable table) + { + if (table != null) + { + huffval = table.Values; + bits = table.Lengths; + + GenerateSizeTable(); + GenerateCodeTable(); + GenerateDecoderTables(); + } + else + { + // Encode initialization + + bitsList = new List(); + bitsList.Add(JpegHuffmanTable.StdDCLuminance.Lengths); + bitsList.Add(JpegHuffmanTable.StdACLuminance.Lengths); + bitsList.Add(JpegHuffmanTable.StdDCChrominance.Lengths); + bitsList.Add(JpegHuffmanTable.StdACChrominance.Lengths); + + val = new List(); + val.Add(JpegHuffmanTable.StdDCLuminance.Values); + val.Add(JpegHuffmanTable.StdACLuminance.Values); + val.Add(JpegHuffmanTable.StdDCChrominance.Values); + val.Add(JpegHuffmanTable.StdACChrominance.Values); + + initHuf(); + } + } + + /// See Figure C.1 + private void GenerateSizeTable() + { + short index = 0; + for (short i = 0; i < bits.Length; i++) + { + for (short j = 0; j < bits[i]; j++) + { + huffsize[index] = (short)(i + 1); + index++; + } + } + lastk = index; + } + + /// See Figure C.2 + private void GenerateCodeTable() + { + short k = 0; + short si = huffsize[0]; + short code = 0; + for (short i = 0; i < huffsize.Length; i++) + { + while (huffsize[k] == si) + { + huffcode[k] = code; + code++; + k++; + } + code <<= 1; + si++; + } + } + + /// See figure F.15 + private void GenerateDecoderTables() + { + short bitcount = 0; + for (int i = 0; i < 16; i++) + { + if (bits[i] != 0) + valptr[i] = bitcount; + for (int j = 0; j < bits[i]; j++) + { + if (huffcode[j + bitcount] < mincode[i] || mincode[i] == -1) + mincode[i] = huffcode[j + bitcount]; + + if (huffcode[j + bitcount] > maxcode[i]) + maxcode[i] = huffcode[j + bitcount]; + } + if (mincode[i] != -1) + valptr[i] = (short)(valptr[i] - mincode[i]); + bitcount += bits[i]; + } + } + + /// Figure F.12 + public static int Extend(int diff, int t) + { + // here we use bitshift to implement 2^ ... + // NOTE: Math.Pow returns 0 for negative powers, which occassionally happen here! + + int Vt = 1 << t - 1; + // WAS: int Vt = (int)Math.Pow(2, (t - 1)); + + if (diff < Vt) + { + Vt = (-1 << t) + 1; + diff = diff + Vt; + } + return diff; + } + + /// Figure F.16 - Reads the huffman code bit-by-bit. + /*public int Decode(JPEGBinaryReader JPEGStream) + { + int i = 0; + short code = (short)JPEGStream.ReadBits(1); + while (code > maxcode[i]) + { + i++; + code <<= 1; + code |= (short)JPEGStream.ReadBits(1); + } + int val = huffval[code + (valptr[i])]; + if (val < 0) + val = 256 + val; + return val; + }*/ + + /// + /// HuffmanBlockEncoder run length encodes and Huffman encodes the quantized data. + /// + internal void HuffmanBlockEncoder(Stream outStream, int[] zigzag, int prec, int DCcode, int ACcode) + { + int temp, temp2, nbits, k, r, i; + + NumOfDCTables = 2; + NumOfACTables = 2; + + // The DC portion + + temp = temp2 = zigzag[0] - prec; + if (temp < 0) + { + temp = -temp; + temp2--; + } + nbits = 0; + while (temp != 0) + { + nbits++; + temp >>= 1; + } + // if (nbits > 11) nbits = 11; + bufferIt(outStream, + DC_matrix[DCcode][nbits, 0], + DC_matrix[DCcode][nbits, 1]); + + // The arguments in bufferIt are code and size. + if (nbits != 0) + { + bufferIt(outStream, temp2, nbits); + } + + // The AC portion + + r = 0; + + for (k = 1; k < 64; k++) + { + if ((temp = zigzag[ ZigZag.ZigZagMap[k] ]) == 0) + { + r++; + } + else + { + while (r > 15) + { + bufferIt(outStream, + AC_matrix[ACcode][0xF0, 0], + AC_matrix[ACcode][0xF0, 1]); + + r -= 16; + } + temp2 = temp; + if (temp < 0) + { + temp = -temp; + temp2--; + } + nbits = 1; + while ((temp >>= 1) != 0) + { + nbits++; + } + i = (r << 4) + nbits; + bufferIt(outStream, + AC_matrix[ACcode][i, 0], + AC_matrix[ACcode][i, 1]); + bufferIt(outStream, temp2, nbits); + + r = 0; + } + } + + if (r > 0) + { + bufferIt(outStream, + AC_matrix[ACcode][0, 0], + AC_matrix[ACcode][0, 1]); + } + } + + /// + /// Uses an integer long (32 bits) buffer to store the Huffman encoded bits + /// and sends them to outStream by the byte. + /// + void bufferIt(Stream outStream, int code, int size) + { + int PutBuffer = code; + int PutBits = bufferPutBits; + + PutBuffer &= (1 << size) - 1; + PutBits += size; + PutBuffer <<= 24 - PutBits; + PutBuffer |= bufferPutBuffer; + + while (PutBits >= 8) + { + int c = ((PutBuffer >> 16) & 0xFF); + outStream.WriteByte((byte)c); + + // FF must be escaped + if (c == 0xFF) outStream.WriteByte(0); + + PutBuffer <<= 8; + PutBits -= 8; + } + bufferPutBuffer = PutBuffer; + bufferPutBits = PutBits; + + } + + public void FlushBuffer(Stream outStream) + { + int PutBuffer = bufferPutBuffer; + int PutBits = bufferPutBits; + while (PutBits >= 8) + { + int c = ((PutBuffer >> 16) & 0xFF); + outStream.WriteByte((byte)c); + + // FF must be escaped + if (c == 0xFF) outStream.WriteByte(0); + + PutBuffer <<= 8; + PutBits -= 8; + } + if (PutBits > 0) + { + int c = ((PutBuffer >> 16) & 0xFF); + outStream.WriteByte((byte)c); + } + } + + + /// + /// Initialisation of the Huffman codes for Luminance and Chrominance. + /// This code results in the same tables created in the IJG Jpeg-6a + /// library. + /// + public void initHuf() + { + DC_matrix0 = new int[12, 2]; + DC_matrix1 = new int[12, 2]; + AC_matrix0 = new int[255, 2]; + AC_matrix1 = new int[255, 2]; + DC_matrix = new int[2][,]; + AC_matrix = new int[2][,]; + int p, l, i, lastp, si, code; + int[] huffsize = new int[257]; + int[] huffcode = new int[257]; + + short[] bitsDCchrominance = JpegHuffmanTable.StdDCChrominance.Lengths; + short[] bitsACchrominance = JpegHuffmanTable.StdACChrominance.Lengths; + short[] bitsDCluminance = JpegHuffmanTable.StdDCLuminance.Lengths; + short[] bitsACluminance = JpegHuffmanTable.StdACLuminance.Lengths; + + + short[] valDCchrominance = JpegHuffmanTable.StdDCChrominance.Values; + short[] valACchrominance = JpegHuffmanTable.StdACChrominance.Values; + short[] valDCluminance = JpegHuffmanTable.StdDCLuminance.Values; + short[] valACluminance = JpegHuffmanTable.StdACLuminance.Values; + + + /* + * init of the DC values for the chrominance + * [,0] is the code [,1] is the number of bit + */ + + p = 0; + for (l = 0; l < 16; l++) + { + for (i = 1; i <= bitsDCchrominance[l]; i++) + { + huffsize[p++] = l+1; + } + } + + huffsize[p] = 0; + lastp = p; + + code = 0; + si = huffsize[0]; + p = 0; + while (huffsize[p] != 0) + { + while (huffsize[p] == si) + { + huffcode[p++] = code; + code++; + } + code <<= 1; + si++; + } + + for (p = 0; p < lastp; p++) + { + DC_matrix1[valDCchrominance[p], 0] = huffcode[p]; + DC_matrix1[valDCchrominance[p], 1] = huffsize[p]; + } + + /* + * Init of the AC hufmann code for the chrominance + * matrix [,,0] is the code & matrix[,,1] is the number of bit needed + */ + + p = 0; + for (l = 0; l < 16; l++) + { + for (i = 1; i <= bitsACchrominance[l]; i++) + { + huffsize[p++] = l+1; + } + } + huffsize[p] = 0; + lastp = p; + + code = 0; + si = huffsize[0]; + p = 0; + while (huffsize[p] != 0) + { + while (huffsize[p] == si) + { + huffcode[p++] = code; + code++; + } + code <<= 1; + si++; + } + + for (p = 0; p < lastp; p++) + { + AC_matrix1[valACchrominance[p], 0] = huffcode[p]; + AC_matrix1[valACchrominance[p], 1] = huffsize[p]; + } + + /* + * init of the DC values for the luminance + * [,0] is the code [,1] is the number of bit + */ + p = 0; + for (l = 0; l < 16; l++) + { + for (i = 1; i <= bitsDCluminance[l]; i++) + { + huffsize[p++] = l+1; + } + } + huffsize[p] = 0; + lastp = p; + + code = 0; + si = huffsize[0]; + p = 0; + while (huffsize[p] != 0) + { + while (huffsize[p] == si) + { + huffcode[p++] = code; + code++; + } + code <<= 1; + si++; + } + + for (p = 0; p < lastp; p++) + { + DC_matrix0[valDCluminance[p], 0] = huffcode[p]; + DC_matrix0[valDCluminance[p], 1] = huffsize[p]; + } + + /* + * Init of the AC hufmann code for luminance + * matrix [,,0] is the code & matrix[,,1] is the number of bit + */ + + p = 0; + for (l = 0; l < 16; l++) + { + for (i = 1; i <= bitsACluminance[l]; i++) + { + huffsize[p++] = l+1; + } + } + huffsize[p] = 0; + lastp = p; + + code = 0; + si = huffsize[0]; + p = 0; + while (huffsize[p] != 0) + { + while (huffsize[p] == si) + { + huffcode[p++] = code; + code++; + } + code <<= 1; + si++; + } + for (int q = 0; q < lastp; q++) + { + AC_matrix0[valACluminance[q], 0] = huffcode[q]; + AC_matrix0[valACluminance[q], 1] = huffsize[q]; + } + + DC_matrix[0] = DC_matrix0; + DC_matrix[1] = DC_matrix1; + AC_matrix[0] = AC_matrix0; + AC_matrix[1] = AC_matrix1; + } + + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegComponent.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegComponent.cs new file mode 100644 index 0000000..4d8c8e0 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegComponent.cs @@ -0,0 +1,702 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Collections.Generic; +using System.Text; +using FluxJpeg.Core.IO; +using System.Reflection.Emit; +using System.Diagnostics; + +namespace FluxJpeg.Core.Decoder +{ + + internal class JpegComponent + { + public byte factorH, factorV, component_id, quant_id; + public int width = 0, height = 0; + public HuffmanTable ACTable; + public HuffmanTable DCTable; + + public int[] QuantizationTable { + set + { + quantizationTable = value; + _quant = EmitQuantize(); + } + } + private int[] quantizationTable; + + public float previousDC = 0; + private JpegScan parent; + + // Current MCU block + float[,][] scanMCUs = null; + + private List scanData = new List(); + + public int BlockCount { get { return scanData.Count; } } + + private List scanDecoded = new List(); + + public int spectralStart, spectralEnd; + public int successiveLow; + + public JpegComponent(JpegScan parentScan, byte id, byte factorHorizontal, byte factorVertical, + byte quantizationID, byte colorMode) + { + parent = parentScan; + + /* Set default tables in case they're not provided. J. Powers */ + // TODO: only gen if needed + + if (colorMode == JPEGFrame.JPEG_COLOR_YCbCr) + { + if (id == 1) // Luminance + { + ACTable = new HuffmanTable(JpegHuffmanTable.StdACLuminance); + DCTable = new HuffmanTable(JpegHuffmanTable.StdDCLuminance); + } + else + { + ACTable = new HuffmanTable( JpegHuffmanTable.StdACChrominance); + DCTable = new HuffmanTable( JpegHuffmanTable.StdACLuminance); + } + } + + component_id = id; + + factorH = factorHorizontal; + factorV = factorVertical; + + quant_id = quantizationID; + } + + /// + /// If a restart marker is found with too little of an MCU count (i.e. our + /// Restart Interval is 63 and we have 61 we copy the last MCU until it's full) + /// + public void padMCU(int index, int length) + { + scanMCUs = new float[factorH, factorV][]; + + for(int n = 0; n < length; n++) + { + if (scanData.Count >= (index + length)) continue; + + for (int i = 0; i < factorH; i++) + for (int j = 0; j < factorV; j++) + scanMCUs[i, j] = (float[])scanData[index - 1][i,j].Clone(); + + scanData.Add(scanMCUs); + } + + } + + /// + /// Reset the interval by setting the previous DC value + /// + public void resetInterval() + { + previousDC = 0; + } + + private delegate void QuantizeDel(float[] arr); + private QuantizeDel _quant = null; + + private QuantizeDel EmitQuantize() + { + Type[] args = { typeof(float[]) }; + + DynamicMethod quantizeMethod = new DynamicMethod("Quantize", + null, // no return type + args); // input array + + ILGenerator il = quantizeMethod.GetILGenerator(); + + for (int i = 0; i < quantizationTable.Length; i++) + { + float mult = (float)quantizationTable[i]; + + // Sz Stack: + il.Emit(OpCodes.Ldarg_0); // 1 {arr} + il.Emit(OpCodes.Ldc_I4_S, (short)i); // 3 {arr,i} + il.Emit(OpCodes.Ldarg_0); // 1 {arr,i,arr} + il.Emit(OpCodes.Ldc_I4_S, (short)i); // 3 {arr,i,arr,i} + il.Emit(OpCodes.Ldelem_R4); // 1 {arr,i,arr[i]} + il.Emit(OpCodes.Ldc_R4, mult); // 5 {arr,i,arr[i],mult} + il.Emit(OpCodes.Mul); // 1 {arr,i,arr[i]*mult} + il.Emit(OpCodes.Stelem_R4); // 1 {} + + } + + il.Emit(OpCodes.Ret); + + return (QuantizeDel)quantizeMethod.CreateDelegate(typeof(QuantizeDel)); + } + + /// + /// Run the Quantization backward method on all of the block data. + /// + public void quantizeData() + { + for (int i = 0; i < scanData.Count; i++) + { + for(int v = 0; v < factorV; v++) + for (int h = 0; h < factorH; h++) + { + // Dynamic IL method + _quant(scanData[i][h, v]); + + // Old technique + //float[] toQuantize = scanData[i][h, v]; + //for (int j = 0; j < 64; j++) toQuantize[j] *= quantizationTable[j]; + } + } + + } + + public void setDCTable(JpegHuffmanTable table) + { + DCTable = new HuffmanTable(table); + } + + public void setACTable(JpegHuffmanTable table) + { + ACTable = new HuffmanTable(table); + } + + DCT _dct = new DCT(); + + /// + /// Run the Inverse DCT method on all of the block data + /// + public void idctData() + { + float[] unZZ = new float[64]; + float[] toDecode = null; + + for (int i = 0; i < scanData.Count; i++) + { + for (int v = 0; v < factorV; v++) + for (int h = 0; h < factorH; h++) + { + toDecode = scanData[i][h, v]; + ZigZag.UnZigZag(toDecode, unZZ); + //FJCore.Profiling.IDCTWatch.Start(); + scanDecoded.Add(_dct.FastIDCT(unZZ)); + //FJCore.Profiling.IDCTWatch.Stop(); + } + } + } + + private int factorUpV { get { return parent.MaxV / factorV; } } + private int factorUpH { get { return parent.MaxH / factorH; } } + + + /// + /// Stretches components as needed to normalize the size of all components. + /// For example, in a 2x1 (4:2:2) sequence, the Cr and Cb channels will be + /// scaled vertically by a factor of 2. + /// + public void scaleByFactors( BlockUpsamplingMode mode ) + { + int factorUpVertical = factorUpV, + factorUpHorizontal = factorUpH; + + if (factorUpVertical == 1 && factorUpHorizontal == 1) return; + + for (int i = 0; i < scanDecoded.Count; i++) + { + byte[,] src = scanDecoded[i]; + + int oldV = src.GetLength(0), + oldH = src.GetLength(1), + newV = oldV * factorUpVertical, + newH = oldH * factorUpHorizontal; + + byte[,] dest = new byte[newV, newH]; + + switch (mode) + { + case BlockUpsamplingMode.BoxFilter: + #region Upsampling by repeating values + /* Perform scaling (Box filter) */ + for (int u = 0; u < newH; u++) + { + int src_u = u / factorUpHorizontal; + for (int v = 0; v < newV; v++) + { + int src_v = v / factorUpVertical; + dest[v, u] = src[src_v, src_u]; + } + } + #endregion + break; + + case BlockUpsamplingMode.Interpolate: + #region Upsampling by interpolation + + for (int u = 0; u < newH; u++) + { + for (int v = 0; v < newV; v++) + { + int val = 0; + + for (int x = 0; x < factorUpHorizontal; x++) + { + int src_u = (u + x) / factorUpHorizontal; + if (src_u >= oldH) src_u = oldH - 1; + + for (int y = 0; y < factorUpVertical; y++) + { + int src_v = (v + y) / factorUpVertical; + + if (src_v >= oldV) src_v = oldV - 1; + + val += src[src_v, src_u]; + } + } + + dest[v, u] = (byte)(val / (factorUpHorizontal * factorUpVertical)); + } + } + + #endregion + break; + + default: + throw new ArgumentException("Upsampling mode not supported."); + } + + scanDecoded[i] = dest; + } + + } + + + public void writeBlock(byte[][,] raster, byte[,] data, + int compIndex, int x, int y) + { + int w = raster[0].GetLength(0), + h = raster[0].GetLength(1); + + byte[,] comp = raster[compIndex]; + + // Blocks may spill over the frame so we bound by the frame size + int yMax = data.GetLength(0); if ((y + yMax) > h) yMax = h - y; + int xMax = data.GetLength(1); if ((x + xMax) > w) xMax = w - x; + + for (int yIndex = 0; yIndex < yMax; yIndex++) + { + for (int xIndex = 0; xIndex < xMax; xIndex++) + { + comp[x + xIndex, y + yIndex] = data[yIndex, xIndex]; + } + } + } + + public void writeDataScaled(byte[][,] raster, int componentIndex, BlockUpsamplingMode mode) + { + int x = 0, y = 0, lastblockheight = 0, incrementblock = 0; + + int blockIdx = 0; + + int w = raster[0].GetLength(0), + h = raster[0].GetLength(1); + + // Keep looping through all of the blocks until there are no more. + while (blockIdx < scanDecoded.Count) + { + int blockwidth = 0; + int blockheight = 0; + + if (x >= w) { x = 0; y += incrementblock; } + + // Loop through the horizontal component blocks of the MCU first + // then for each horizontal line write out all of the vertical + // components + for (int factorVIndex = 0; factorVIndex < factorV; factorVIndex++) + { + blockwidth = 0; + + for (int factorHIndex = 0; factorHIndex < factorH; factorHIndex++) + { + // Captures the width of this block so we can increment the X coordinate + byte[,] blockdata = scanDecoded[blockIdx++]; + + // Writes the data at the specific X and Y coordinate of this component + writeBlockScaled(raster, blockdata, componentIndex, x, y, mode); + + blockwidth += blockdata.GetLength(1) * factorUpH; + x += blockdata.GetLength(1) * factorUpH; + blockheight = blockdata.GetLength(0) * factorUpV; + } + + y += blockheight; + x -= blockwidth; + lastblockheight += blockheight; + } + y -= lastblockheight; + incrementblock = lastblockheight; + lastblockheight = 0; + x += blockwidth; + } + } + + private void writeBlockScaled(byte[][,] raster, byte[,] blockdata, int compIndex, int x, int y, BlockUpsamplingMode mode) + { + int w = raster[0].GetLength(0), + h = raster[0].GetLength(1); + + int factorUpVertical = factorUpV, + factorUpHorizontal = factorUpH; + + int oldV = blockdata.GetLength(0), + oldH = blockdata.GetLength(1), + newV = oldV * factorUpVertical, + newH = oldH * factorUpHorizontal; + + byte[,] comp = raster[compIndex]; + + // Blocks may spill over the frame so we bound by the frame size + int yMax = newV; if ((y + yMax) > h) yMax = h - y; + int xMax = newH; if ((x + xMax) > w) xMax = w - x; + + switch (mode) + { + case BlockUpsamplingMode.BoxFilter: + + #region Upsampling by repeating values + + // Special case 1: No scale-up + if (factorUpVertical == 1 && factorUpHorizontal == 1) + { + for (int u = 0; u < xMax; u++) + for (int v = 0; v < yMax; v++) + comp[u + x, y + v] = blockdata[v, u]; + } + // Special case 2: Perform scale-up 4 pixels at a time + else if (factorUpHorizontal == 2 && + factorUpVertical == 2 && + xMax == newH && yMax == newV) + { + for (int src_u = 0; src_u < oldH; src_u++) + { + int bx = src_u * 2 + x; + + for ( int src_v = 0; src_v < oldV; src_v++) + { + byte val = blockdata[src_v, src_u]; + int by = src_v * 2 + y; + + comp[bx, by] = val; + comp[bx, by + 1] = val; + comp[bx + 1, by] = val; + comp[bx + 1, by + 1] = val; + } + } + } + else + { + /* Perform scaling (Box filter) */ + for (int u = 0; u < xMax; u++) + { + int src_u = u / factorUpHorizontal; + for (int v = 0; v < yMax; v++) + { + int src_v = v / factorUpVertical; + comp[u + x, y + v] = blockdata[src_v, src_u]; + } + } + } + + + #endregion + break; + + // JRP 4/7/08 -- This mode is disabled temporarily as it needs to be fixed after + // recent performance tweaks. + // It can produce slightly better (less blocky) decodings. + + //case BlockUpsamplingMode.Interpolate: + // #region Upsampling by interpolation + // for (int u = 0; u < newH; u++) + // { + // for (int v = 0; v < newV; v++) + // { + // int val = 0; + // for (int x = 0; x < factorUpHorizontal; x++) + // { + // int src_u = (u + x) / factorUpHorizontal; + // if (src_u >= oldH) src_u = oldH - 1; + // for (int y = 0; y < factorUpVertical; y++) + // { + // int src_v = (v + y) / factorUpVertical; + // if (src_v >= oldV) src_v = oldV - 1; + // val += src[src_v, src_u]; + // } + // } + // dest[v, u] = (byte)(val / (factorUpHorizontal * factorUpVertical)); + // } + // } + // #endregion + // break; + + default: + throw new ArgumentException("Upsampling mode not supported."); + } + + } + + + + + internal delegate void DecodeFunction(JPEGBinaryReader jpegReader, float[] zigzagMCU); + public DecodeFunction Decode; + + public void DecodeBaseline(JPEGBinaryReader stream, float[] dest) + { + float dc = decode_dc_coefficient(stream); + decode_ac_coefficients(stream, dest); + dest[0] = dc; + } + + public void DecodeDCFirst(JPEGBinaryReader stream, float[] dest) + { + float[] datablock = new float[64]; + int s = DCTable.Decode(stream); + int r = stream.ReadBits(s); + s = HuffmanTable.Extend(r, s); + s = (int)previousDC + s; + previousDC = s; + + dest[0] = s << successiveLow; + } + + public void DecodeACFirst(JPEGBinaryReader stream, float[] zz) + { + if (stream.eob_run > 0) + { + stream.eob_run--; + return; + } + + for (int k = spectralStart; k <= spectralEnd; k++) + { + int s = ACTable.Decode(stream); + int r = s >> 4; + s &= 15; + + + if (s != 0) + { + k += r; + + r = (int)stream.ReadBits(s); + s = (int)HuffmanTable.Extend(r, s); + zz[k] = s << successiveLow; + } + else + { + if (r != 15) + { + stream.eob_run = 1 << r; + + if (r != 0) + stream.eob_run += stream.ReadBits(r); + + stream.eob_run--; + + break; + } + + k += 15; + } + } + } + + public void DecodeDCRefine(JPEGBinaryReader stream, float[] dest) + { + if (stream.ReadBits(1) == 1) + { + dest[0] = (int)dest[0] | (1 << successiveLow); + } + } + + public void DecodeACRefine(JPEGBinaryReader stream, float[] dest) + { + int p1 = 1 << successiveLow; + int m1 = (-1) << successiveLow; + + int k = spectralStart; + + if (stream.eob_run == 0) + for (; k <= spectralEnd; k++) + { + #region Decode and check S + + int s = ACTable.Decode(stream); + int r = s >> 4; + s &= 15; + + if (s != 0) + { + if (s != 1) + throw new Exception("Decode Error"); + + if (stream.ReadBits(1) == 1) + s = p1; + else + s = m1; + } + else + { + if (r != 15) + { + stream.eob_run = 1 << r; + + if (r > 0) + stream.eob_run += stream.ReadBits(r); + break; + } + + } // if (s != 0) + + #endregion + + // Apply the update + do + { + if (dest[k] != 0) + { + if (stream.ReadBits(1) == 1) + { + if (((int)dest[k] & p1) == 0) + { + if (dest[k] >= 0) + dest[k] += p1; + else + dest[k] += m1; + } + } + } + else + { + if (--r < 0) + break; + } + + k++; + + } while (k <= spectralEnd); + + if( (s != 0) && k < 64) + { + dest[k] = s; + } + } // for k = start ... end + + + if (stream.eob_run > 0) + { + for (; k <= spectralEnd; k++) + { + if (dest[k] != 0) + { + if (stream.ReadBits(1) == 1) + { + if (((int)dest[k] & p1) == 0) + { + if (dest[k] >= 0) + dest[k] += p1; + else + dest[k] += m1; + } + } + } + } + + stream.eob_run--; + } + } + + + public void SetBlock(int idx) + { + if (scanData.Count < idx) + throw new Exception("Invalid block ID."); + + // expand the data list + if (scanData.Count == idx) + { + scanMCUs = new float[factorH, factorV][]; + for (int i = 0; i < factorH; i++) + for (int j = 0; j < factorV; j++) + scanMCUs[i, j] = new float[64]; + + scanData.Add(scanMCUs); + } + else // reference an existing block + { + scanMCUs = scanData[idx]; + } + } + + public void DecodeMCU(JPEGBinaryReader jpegReader, int i, int j) + { + Decode(jpegReader, scanMCUs[i,j]); + } + + /// + /// Generated from text on F-22, F.2.2.1 - Huffman decoding of DC + /// coefficients on ISO DIS 10918-1. Requirements and Guidelines. + /// + /// Stream that contains huffman bits + /// DC coefficient + public float decode_dc_coefficient(JPEGBinaryReader JPEGStream) + { + int t = DCTable.Decode(JPEGStream); + float diff = JPEGStream.ReadBits(t); + diff = HuffmanTable.Extend((int)diff, t); + diff = (previousDC + diff); + previousDC = diff; + return diff; + } + + + /// + /// Generated from text on F-23, F.13 - Huffman decoded of AC coefficients + /// on ISO DIS 10918-1. Requirements and Guidelines. + /// + internal void decode_ac_coefficients(JPEGBinaryReader JPEGStream, float[] zz) + { + for (int k = 1; k < 64; k++) + { + int s = ACTable.Decode(JPEGStream); + int r = s >> 4; + s &= 15; + + + if (s != 0) + { + k += r; + + r = (int)JPEGStream.ReadBits(s); + s = (int)HuffmanTable.Extend(r, s); + zz[k] = s; + } + else + { + if (r != 15) + { + //throw new JPEGMarkerFoundException(); + return; + } + k += 15; + } + } + } + } + +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegDecoder.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegDecoder.cs new file mode 100644 index 0000000..6e654db --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegDecoder.cs @@ -0,0 +1,614 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.IO; +using FluxJpeg.Core.IO; +using System.Diagnostics; + +namespace FluxJpeg.Core.Decoder +{ + public enum BlockUpsamplingMode { + /// The simplest upsampling mode. Produces sharper edges. + BoxFilter, + /// Smoother upsampling. May improve color spread for some images. + Interpolate + } + + public class JpegDecodeProgressChangedArgs : EventArgs + { + public bool SizeReady; + public int Width; + public int Height; + + public bool Abort; + public long ReadPosition; // 0 to input stream length + public double DecodeProgress; // 0 to 1.0 + } + + public class JpegDecoder + { + public static long ProgressUpdateByteInterval = 100; + + public event EventHandler DecodeProgressChanged; + private JpegDecodeProgressChangedArgs DecodeProgress = new JpegDecodeProgressChangedArgs(); + + public BlockUpsamplingMode BlockUpsamplingMode { get; set; } + + byte majorVersion, minorVersion; + private enum UnitType { None = 0, Inches = 1, Centimeters = 2 }; + UnitType Units; + ushort XDensity, YDensity; + byte Xthumbnail, Ythumbnail; + byte[] thumbnail; + Image image; + int width; + int height; + + bool progressive = false; + + byte marker; + + /// + /// This decoder expects JFIF 1.02 encoding. + /// + internal const byte MAJOR_VERSION = (byte)1; + internal const byte MINOR_VERSION = (byte)2; + + /// + /// The length of the JFIF field not including thumbnail data. + /// + internal static short JFIF_FIXED_LENGTH = 16; + + /// + /// The length of the JFIF extension field not including extension data. + /// + internal static short JFXX_FIXED_LENGTH = 8; + + private JPEGBinaryReader jpegReader; + + List jpegFrames = new List(); + + JpegHuffmanTable[] dcTables = new JpegHuffmanTable[4]; + JpegHuffmanTable[] acTables = new JpegHuffmanTable[4]; + JpegQuantizationTable[] qTables = new JpegQuantizationTable[4]; + + public JpegDecoder(Stream input) + { + jpegReader = new JPEGBinaryReader(input); + + if (jpegReader.GetNextMarker() != JPEGMarker.SOI) + throw new Exception("Failed to find SOI marker."); + } + + /// + /// Tries to parse the JFIF APP0 header + /// See http://en.wikipedia.org/wiki/JFIF + /// + private bool TryParseJFIF(byte[] data) + { + IO.BinaryReader reader = new IO.BinaryReader(new MemoryStream(data)); + + int length = data.Length + 2; // Data & length + + if (!(length >= JFIF_FIXED_LENGTH)) + return false; // Header's too small. + + byte[] identifier = new byte[5]; + reader.Read(identifier, 0, identifier.Length); + if (identifier[0] != JPEGMarker.JFIF_J + || identifier[1] != JPEGMarker.JFIF_F + || identifier[2] != JPEGMarker.JFIF_I + || identifier[3] != JPEGMarker.JFIF_F + || identifier[4] != JPEGMarker.X00) + return false; // Incorrect bytes + + majorVersion = reader.ReadByte(); + minorVersion = reader.ReadByte(); + if (majorVersion != MAJOR_VERSION + || (majorVersion == MAJOR_VERSION + && minorVersion > MINOR_VERSION)) // changed from < + return false; // Unsupported version + + Units = (UnitType)reader.ReadByte(); + if (Units != UnitType.None && + Units != UnitType.Inches && + Units != UnitType.Centimeters) + return false; // Invalid units + + XDensity = reader.ReadShort(); + YDensity = reader.ReadShort(); + Xthumbnail = reader.ReadByte(); + Ythumbnail = reader.ReadByte(); + + // 3 * for RGB data + int thumbnailLength = 3 * Xthumbnail * Ythumbnail; + if (length > JFIF_FIXED_LENGTH + && thumbnailLength != length - JFIF_FIXED_LENGTH) + return false; // Thumbnail fields invalid + + if (thumbnailLength > 0) + { + thumbnail = new byte[thumbnailLength]; + if (reader.Read(thumbnail, 0, thumbnailLength) != thumbnailLength) + return false; // Thumbnail data was missing! + + } + + return true; + } + + public DecodedJpeg Decode() + { + // The frames in this jpeg are loaded into a list. There is + // usually just one frame except in heirarchial progression where + // there are multiple frames. + JPEGFrame frame = null; + + // The restart interval defines how many MCU's we should have + // between the 8-modulo restart marker. The restart markers allow + // us to tell whether or not our decoding process is working + // correctly, also if there is corruption in the image we can + // recover with these restart intervals. (See RSTm DRI). + int resetInterval = 0; + + bool haveMarker = false; + bool foundJFIF = false; + + List headers = new List(); + + // Loop through until there are no more markers to read in, at + // that point everything is loaded into the jpegFrames array and + // can be processed. + while (true) + { + if (DecodeProgress.Abort) return null; + + #region Switch over marker types + switch (marker) + { + case JPEGMarker.APP0: + // APP1 is used for EXIF data + case JPEGMarker.APP1: + // Seldomly, APP2 gets used for extended EXIF, too + case JPEGMarker.APP2: + case JPEGMarker.APP3: + case JPEGMarker.APP4: + case JPEGMarker.APP5: + case JPEGMarker.APP6: + case JPEGMarker.APP7: + case JPEGMarker.APP8: + case JPEGMarker.APP9: + case JPEGMarker.APP10: + case JPEGMarker.APP11: + case JPEGMarker.APP12: + case JPEGMarker.APP13: + case JPEGMarker.APP14: + case JPEGMarker.APP15: + // COM: Comment + case JPEGMarker.COM: + + // Debug.WriteLine(string.Format("Extracting Header, Type={0:X}", marker)); + + JpegHeader header = ExtractHeader(); + + #region Check explicitly for Exif Data + + if (header.Marker == JPEGMarker.APP1 && header.Data.Length >= 6) + { + byte[] d = header.Data; + + if( d[0] == 'E' && + d[1] == 'x' && + d[2] == 'i' && + d[3] == 'f' && + d[4] == 0 && + d[5] == 0) + { + // Exif. Do something? + } + } + + #endregion + + #region Check for Adobe header + + if (header.Data.Length >= 5 && header.Marker == JPEGMarker.APP14) + { + string asText = UTF8Encoding.UTF8.GetString(header.Data, 0, 5); + if (asText == "Adobe") { + // ADOBE HEADER. Do anything? + } + } + + #endregion + + headers.Add(header); + + if (!foundJFIF && marker == JPEGMarker.APP0) + { + foundJFIF = TryParseJFIF(header.Data); + + if (foundJFIF) // Found JFIF... do JFIF extension follow? + { + header.IsJFIF = true; + marker = jpegReader.GetNextMarker(); + + // Yes, they do. + if (marker == JPEGMarker.APP0) + { + header = ExtractHeader(); + headers.Add(header); + } + else // No. Delay processing this one. + haveMarker = true; + } + } + + break; + + case JPEGMarker.SOF0: + case JPEGMarker.SOF2: + + // SOFn Start of Frame Marker, Baseline DCT - This is the start + // of the frame header that defines certain variables that will + // be carried out through the rest of the encoding. Multiple + // frames are used in a hierarchical system, however most JPEG's + // only contain a single frame. + + // Progressive or baseline? + progressive = marker == JPEGMarker.SOF2; + + jpegFrames.Add(new JPEGFrame()); + frame = (JPEGFrame)jpegFrames[jpegFrames.Count - 1]; + frame.ProgressUpdateMethod = new Action(UpdateStreamProgress); + + // Skip the frame length. + jpegReader.ReadShort(); + // Bits percision, either 8 or 12. + frame.setPrecision(jpegReader.ReadByte()); + // Scan lines (height) + frame.ScanLines = jpegReader.ReadShort(); + // Scan samples per line (width) + frame.SamplesPerLine = jpegReader.ReadShort(); + // Number of Color Components (channels). + frame.ComponentCount = jpegReader.ReadByte(); + + DecodeProgress.Height = frame.Height; + DecodeProgress.Width = frame.Width; + DecodeProgress.SizeReady = true; + + if(DecodeProgressChanged != null) + { + DecodeProgressChanged(this, DecodeProgress); + if (DecodeProgress.Abort) return null; + } + + // Add all of the necessary components to the frame. + for (int i = 0; i < frame.ComponentCount; i++) + { + byte compId = jpegReader.ReadByte(); + byte sampleFactors = jpegReader.ReadByte(); + byte qTableId = jpegReader.ReadByte(); + + byte sampleHFactor = (byte)(sampleFactors >> 4); + byte sampleVFactor = (byte)(sampleFactors & 0x0f); + + frame.AddComponent(compId, sampleHFactor, sampleVFactor, qTableId); + } + break; + + case JPEGMarker.DHT: + + // DHT non-SOF Marker - Huffman Table is required for decoding + // the JPEG stream, when we receive a marker we load in first + // the table length (16 bits), the table class (4 bits), table + // identifier (4 bits), then we load in 16 bytes and each byte + // represents the count of bytes to load in for each of the 16 + // bytes. We load this into an array to use later and move on 4 + // huffman tables can only be used in an image. + int huffmanLength = (jpegReader.ReadShort() - 2); + + // Keep looping until we are out of length. + int index = huffmanLength; + + // Multiple tables may be defined within a DHT marker. This + // will keep reading until there are no tables left, most + // of the time there are just one tables. + while (index > 0) + { + // Read the identifier information and class + // information about the Huffman table, then read the + // 16 byte codelength in and read in the Huffman values + // and put it into table info. + byte huffmanInfo = jpegReader.ReadByte(); + byte tableClass = (byte)(huffmanInfo >> 4); + byte huffmanIndex = (byte)(huffmanInfo & 0x0f); + short[] codeLength = new short[16]; + + for (int i = 0; i < codeLength.Length; i++) + codeLength[i] = jpegReader.ReadByte(); + + int huffmanValueLen = 0; + for (int i = 0; i < 16; i++) + huffmanValueLen += codeLength[i]; + index -= (huffmanValueLen + 17); + + short[] huffmanVal = new short[huffmanValueLen]; + for (int i = 0; i < huffmanVal.Length; i++) + { + huffmanVal[i] = jpegReader.ReadByte(); + } + // Assign DC Huffman Table. + if (tableClass == HuffmanTable.JPEG_DC_TABLE) + dcTables[(int)huffmanIndex] = new JpegHuffmanTable(codeLength, huffmanVal); + + // Assign AC Huffman Table. + else if (tableClass == HuffmanTable.JPEG_AC_TABLE) + acTables[(int)huffmanIndex] = new JpegHuffmanTable(codeLength, huffmanVal); + } + break; + + case JPEGMarker.DQT: + + // DQT non-SOF Marker - This defines the quantization + // coeffecients, this allows us to figure out the quality of + // compression and unencode the data. The data is loaded and + // then stored in to an array. + short quantizationLength = (short)(jpegReader.ReadShort() - 2); + for (int j = 0; j < quantizationLength / 65; j++) + { + byte quantSpecs = jpegReader.ReadByte(); + int[] quantData = new int[64]; + if ((byte)(quantSpecs >> 4) == 0) + // Precision 8 bit. + { + for (int i = 0; i < 64; i++) + quantData[i] = jpegReader.ReadByte(); + + } + else if ((byte)(quantSpecs >> 4) == 1) + // Precision 16 bit. + { + for (int i = 0; i < 64; i++) + quantData[i] = jpegReader.ReadShort(); + } + qTables[(int)(quantSpecs & 0x0f)] = new JpegQuantizationTable(quantData); + } + break; + + case JPEGMarker.SOS: + + Debug.WriteLine("Start of Scan (SOS)"); + + + // SOS non-SOF Marker - Start Of Scan Marker, this is where the + // actual data is stored in a interlaced or non-interlaced with + // from 1-4 components of color data, if three components most + // likely a YCrCb model, this is a fairly complex process. + + // Read in the scan length. + ushort scanLen = jpegReader.ReadShort(); + // Number of components in the scan. + byte numberOfComponents = jpegReader.ReadByte(); + byte[] componentSelector = new byte[numberOfComponents]; + + for (int i = 0; i < numberOfComponents; i++) + { + // Component ID, packed byte containing the Id for the + // AC table and DC table. + byte componentID = jpegReader.ReadByte(); + byte tableInfo = jpegReader.ReadByte(); + + int DC = (tableInfo >> 4) & 0x0f; + int AC = (tableInfo) & 0x0f; + + frame.setHuffmanTables(componentID, + acTables[(byte)AC], + dcTables[(byte)DC]); + + + componentSelector[i] = componentID; + } + + byte startSpectralSelection = jpegReader.ReadByte(); + byte endSpectralSelection = jpegReader.ReadByte(); + byte successiveApproximation = jpegReader.ReadByte(); + + #region Baseline JPEG Scan Decoding + + if (!progressive) + { + frame.DecodeScanBaseline(numberOfComponents, componentSelector, resetInterval, jpegReader, ref marker); + haveMarker = true; // use resultant marker for the next switch(..) + } + + #endregion + + #region Progressive JPEG Scan Decoding + + if (progressive) + { + frame.DecodeScanProgressive( + successiveApproximation, startSpectralSelection, endSpectralSelection, + numberOfComponents, componentSelector, resetInterval, jpegReader, ref marker); + + haveMarker = true; // use resultant marker for the next switch(..) + } + + #endregion + + break; + + + case JPEGMarker.DRI: + jpegReader.BaseStream.Seek(2, System.IO.SeekOrigin.Current); + resetInterval = jpegReader.ReadShort(); + break; + + /// Defines the number of lines. (Not usually present) + case JPEGMarker.DNL: + + frame.ScanLines = jpegReader.ReadShort(); + break; + + /// End of Image. Finish the decode. + case JPEGMarker.EOI: + + if (jpegFrames.Count == 0) + { + throw new NotSupportedException("No JPEG frames could be located."); + } + else if (jpegFrames.Count == 1) + { + // Only one frame, JPEG Non-Heirarchial Frame. + byte[][,] raster = Image.CreateRaster(frame.Width, frame.Height, frame.ComponentCount); + + IList components = frame.Scan.Components; + + int totalSteps = components.Count * 3; // Three steps per loop + int stepsFinished = 0; + + for(int i = 0; i < components.Count; i++) + { + JpegComponent comp = components[i]; + + comp.QuantizationTable = qTables[comp.quant_id].Table; + + // 1. Quantize + comp.quantizeData(); + UpdateProgress(++stepsFinished, totalSteps); + + // 2. Run iDCT (expensive) + comp.idctData(); + UpdateProgress(++stepsFinished, totalSteps); + + // 3. Scale the image and write the data to the raster. + comp.writeDataScaled(raster, i, BlockUpsamplingMode); + + UpdateProgress(++stepsFinished, totalSteps); + + // Ensure garbage collection. + comp = null; GC.Collect(); + } + + // Grayscale Color Image (1 Component). + if (frame.ComponentCount == 1) + { + ColorModel cm = new ColorModel() { colorspace = ColorSpace.Gray, Opaque = true }; + image = new Image(cm, raster); + } + // YCbCr Color Image (3 Components). + else if (frame.ComponentCount == 3) + { + ColorModel cm = new ColorModel() { colorspace = ColorSpace.YCbCr, Opaque = true }; + image = new Image(cm, raster); + } + // Possibly CMYK or RGBA ? + else + { + throw new NotSupportedException("Unsupported Color Mode: 4 Component Color Mode found."); + } + + // If needed, convert centimeters to inches. + Func conv = x => + Units == UnitType.Inches ? x : x / 2.54; + + image.DensityX = conv(XDensity); + image.DensityY = conv(YDensity); + + height = frame.Height; + width = frame.Width; + } + else + { + // JPEG Heirarchial Frame + throw new NotSupportedException("Unsupported Codec Type: Hierarchial JPEG"); + } + break; + + // Only SOF0 (baseline) and SOF2 (progressive) are supported by FJCore + case JPEGMarker.SOF1: + case JPEGMarker.SOF3: + case JPEGMarker.SOF5: + case JPEGMarker.SOF6: + case JPEGMarker.SOF7: + case JPEGMarker.SOF9: + case JPEGMarker.SOF10: + case JPEGMarker.SOF11: + case JPEGMarker.SOF13: + case JPEGMarker.SOF14: + case JPEGMarker.SOF15: + throw new NotSupportedException("Unsupported codec type."); + + default: break; // ignore + + } + + #endregion switch over markers + + if (haveMarker) haveMarker = false; + else + { + try + { + marker = jpegReader.GetNextMarker(); + } + catch (System.IO.EndOfStreamException) + { + break; /* done reading the file */ + } + } + } + + DecodedJpeg result = new DecodedJpeg(image, headers); + + return result; + } + + private JpegHeader ExtractHeader() + { + #region Extract the header + + int length = jpegReader.ReadShort() - 2; + byte[] data = new byte[length]; + jpegReader.Read(data, 0, length); + + #endregion + + JpegHeader header = new JpegHeader() + { + Marker = marker, + Data = data + }; + return header; + } + + #region Decode Progress Monitoring + + private void UpdateStreamProgress(long StreamPosition) + { + if (DecodeProgressChanged != null) + { + DecodeProgress.ReadPosition = StreamPosition; + DecodeProgressChanged(this, DecodeProgress); + }; + } + + private void UpdateProgress(int stepsFinished, int stepsTotal) + { + if (DecodeProgressChanged != null) + { + DecodeProgress.DecodeProgress = (double)stepsFinished / stepsTotal; + DecodeProgressChanged(this, DecodeProgress); + }; + } + + #endregion + + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegFrame.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegFrame.cs new file mode 100644 index 0000000..02f8027 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegFrame.cs @@ -0,0 +1,283 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using FluxJpeg.Core.IO; + +namespace FluxJpeg.Core.Decoder +{ + internal class JPEGFrame + { + public static byte JPEG_COLOR_GRAY = 1; + public static byte JPEG_COLOR_RGB = 2; + public static byte JPEG_COLOR_YCbCr = 3; + public static byte JPEG_COLOR_CMYK = 4; + + public byte precision = 8; + public byte colorMode = JPEGFrame.JPEG_COLOR_YCbCr; + + public ushort Width { get; private set; } + public ushort Height { get; private set; } + + public JpegScan Scan = new JpegScan(); + + public Action ProgressUpdateMethod = null; + + public void AddComponent(byte componentID, byte sampleHFactor, byte sampleVFactor, + byte quantizationTableID) + { + Scan.AddComponent(componentID, sampleHFactor, sampleVFactor, quantizationTableID, colorMode); + } + + public void setPrecision(byte data) { precision = data; } + + public ushort ScanLines { set { Height = value; } } + public ushort SamplesPerLine { set { Width = value; } } + + public byte ColorMode { get { + return ComponentCount == 1 ? + JPEGFrame.JPEG_COLOR_GRAY : + JPEGFrame.JPEG_COLOR_YCbCr; + + } + } + + public byte ComponentCount { get ; set; } + + public void setHuffmanTables(byte componentID, JpegHuffmanTable ACTable, JpegHuffmanTable DCTable) + { + JpegComponent comp = Scan.GetComponentById(componentID); + + if(DCTable != null) comp.setDCTable(DCTable); + if(ACTable != null) comp.setACTable(ACTable); + } + + public void DecodeScanBaseline(byte numberOfComponents, byte[] componentSelector, int resetInterval, JPEGBinaryReader jpegReader, ref byte marker) + { + // Set the decode function for all the components + for (int compIndex = 0; compIndex < numberOfComponents; compIndex++) + { + JpegComponent comp = Scan.GetComponentById(componentSelector[compIndex]); + comp.Decode = comp.DecodeBaseline; + } + + DecodeScan(numberOfComponents, componentSelector, resetInterval, jpegReader, ref marker); + } + + private int mcus_per_row(JpegComponent c) + { + return (((( Width * c.factorH ) + ( Scan.MaxH - 1)) / Scan.MaxH) + 7) / 8; + } + + private void DecodeScan(byte numberOfComponents, byte[] componentSelector, int resetInterval, JPEGBinaryReader jpegReader, ref byte marker) + { + //TODO: not necessary + jpegReader.eob_run = 0; + + int mcuIndex = 0; + int mcuTotalIndex = 0; + + // This loops through until a MarkerTagFound exception is + // found, if the marker tag is a RST (Restart Marker) it + // simply skips it and moves on this system does not handle + // corrupt data streams very well, it could be improved by + // handling misplaced restart markers. + + int h = 0, v = 0; + int x = 0; + + long lastPosition = jpegReader.BaseStream.Position; + + //TODO: replace this with a loop which knows how much data to expect + while (true) + { + #region Inform caller of decode progress + + if (ProgressUpdateMethod != null) + { + if (jpegReader.BaseStream.Position >= lastPosition + JpegDecoder.ProgressUpdateByteInterval) + { + lastPosition = jpegReader.BaseStream.Position; + ProgressUpdateMethod(lastPosition); + } + } + + #endregion + + try + { + // Loop though capturing MCU, instruct each + // component to read in its necessary count, for + // scaling factors the components automatically + // read in how much they need + + // Sec A.2.2 from CCITT Rec. T.81 (1992 E) + bool interleaved = !(numberOfComponents == 1); + + if (!interleaved) + { + JpegComponent comp = Scan.GetComponentById(componentSelector[0]); + + comp.SetBlock(mcuIndex); + + comp.DecodeMCU(jpegReader, h, v); + + int mcus_per_line = mcus_per_row(comp); + int blocks_per_line = (int) Math.Ceiling((double)this.Width / (8 * comp.factorH)); + + + // TODO: Explain the non-interleaved scan ------ + + h++; x++; + + if (h == comp.factorH) + { + h = 0; mcuIndex++; + } + + if( (x % mcus_per_line) == 0) + { + x = 0; + v++; + + if (v == comp.factorV) + { + if (h != 0) { mcuIndex++; h = 0; } + v = 0; + } + else + { + mcuIndex -= blocks_per_line; + + // we were mid-block + if (h != 0) { mcuIndex++; h = 0; } + } + } + + // ----------------------------------------------- + + } + else // Components are interleaved + { + for (int compIndex = 0; compIndex < numberOfComponents; compIndex++) + { + JpegComponent comp = Scan.GetComponentById(componentSelector[compIndex]); + comp.SetBlock(mcuTotalIndex); + + for (int j = 0; j < comp.factorV; j++) + for (int i = 0; i < comp.factorH; i++) + { + comp.DecodeMCU(jpegReader, i, j); + } + } + + mcuIndex++; + mcuTotalIndex++; + } + } + // We've found a marker, see if the marker is a restart + // marker or just the next marker in the stream. If + // it's the next marker in the stream break out of the + // while loop, if it's just a restart marker skip it + catch (JPEGMarkerFoundException ex) + { + marker = ex.Marker; + + // Handle JPEG Restart Markers, this is where the + // count of MCU's per interval is compared with + // the count actually obtained, if it's short then + // pad on some MCU's ONLY for components that are + // greater than one. Also restart the DC prediction + // to zero. + if (marker == JPEGMarker.RST0 + || marker == JPEGMarker.RST1 + || marker == JPEGMarker.RST2 + || marker == JPEGMarker.RST3 + || marker == JPEGMarker.RST4 + || marker == JPEGMarker.RST5 + || marker == JPEGMarker.RST6 + || marker == JPEGMarker.RST7) + { + for (int compIndex = 0; compIndex < numberOfComponents; compIndex++) + { + JpegComponent comp = Scan.GetComponentById(componentSelector[compIndex]); + if (compIndex > 1) + comp.padMCU(mcuTotalIndex, resetInterval - mcuIndex); + comp.resetInterval(); + } + + mcuTotalIndex += (resetInterval - mcuIndex); + mcuIndex = 0; + } + else + { + break; // We're at the end of our scan, exit out. + } + } + } + + } + + public void DecodeScanProgressive(byte successiveApproximation, byte startSpectralSelection, byte endSpectralSelection, + byte numberOfComponents, byte[] componentSelector, int resetInterval, JPEGBinaryReader jpegReader, ref byte marker) + { + + byte successiveHigh = (byte)(successiveApproximation >> 4); + byte successiveLow = (byte)(successiveApproximation & 0x0f); + + if ((startSpectralSelection > endSpectralSelection) || (endSpectralSelection > 63)) + throw new Exception("Bad spectral selection."); + + bool dcOnly = startSpectralSelection == 0; + bool refinementScan = (successiveHigh != 0); + + if (dcOnly) // DC scan + { + if (endSpectralSelection != 0) + throw new Exception("Bad spectral selection for DC only scan."); + } + else // AC scan + { + if (numberOfComponents > 1) + throw new Exception("Too many components for AC scan!"); + } + + // Set the decode function for all the components + // TODO: set this for the scan and let the component figure it out + for (int compIndex = 0; compIndex < numberOfComponents; compIndex++) + { + JpegComponent comp = Scan.GetComponentById(componentSelector[compIndex]); + + comp.successiveLow = successiveLow; + + if (dcOnly) + { + if (refinementScan) // DC refine + comp.Decode = comp.DecodeDCRefine; + else // DC first + comp.Decode = comp.DecodeDCFirst; + } + else + { + comp.spectralStart = startSpectralSelection; + comp.spectralEnd = endSpectralSelection; + + if (refinementScan) // AC refine + comp.Decode = comp.DecodeACRefine; + else // AC first + comp.Decode = comp.DecodeACFirst; + } + } + + DecodeScan(numberOfComponents, componentSelector, resetInterval, jpegReader, ref marker); + + } + + + + } + +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegHuffmanTable.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegHuffmanTable.cs new file mode 100644 index 0000000..cb4b8f1 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegHuffmanTable.cs @@ -0,0 +1,183 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Linq; + +namespace FluxJpeg.Core +{ + /// + /// The JPEGHuffmanTable class represents a Huffman table read from a + /// JPEG image file. The standard JPEG AC and DC chrominance and + /// luminance values are provided as static fields. + /// + internal class JpegHuffmanTable + { + private short[] lengths; + private short[] values; + + #region Standard JPEG Huffman Tables + + public static JpegHuffmanTable StdACChrominance = + new JpegHuffmanTable(new short[] { 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, + 4, 4, 0, 1, 2, 0x77 }, + new short[] { 0x00, 0x01, 0x02, 0x03, 0x11, + 0x04, 0x05, 0x21, 0x31, 0x06, + 0x12, 0x41, 0x51, 0x07, 0x61, + 0x71, 0x13, 0x22, 0x32, 0x81, + 0x08, 0x14, 0x42, 0x91, 0xa1, + 0xb1, 0xc1, 0x09, 0x23, 0x33, + 0x52, 0xf0, 0x15, 0x62, 0x72, + 0xd1, 0x0a, 0x16, 0x24, 0x34, + 0xe1, 0x25, 0xf1, 0x17, 0x18, + 0x19, 0x1a, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x43, 0x44, + 0x45, 0x46, 0x47, 0x48, 0x49, + 0x4a, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5a, 0x63, + 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6a, 0x73, 0x74, 0x75, + 0x76, 0x77, 0x78, 0x79, 0x7a, + 0x82, 0x83, 0x84, 0x85, 0x86, + 0x87, 0x88, 0x89, 0x8a, 0x92, + 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0xa2, 0xa3, + 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, + 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, + 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, + 0xba, 0xc2, 0xc3, 0xc4, 0xc5, + 0xc6, 0xc7, 0xc8, 0xc9, 0xca, + 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, + 0xd7, 0xd8, 0xd9, 0xda, 0xe2, + 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xf2, 0xf3, + 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, + 0xf9, 0xfa }, false); + + public static JpegHuffmanTable StdACLuminance = + new JpegHuffmanTable(new short[] { 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, + 4, 4, 0, 0, 1, 0x7d }, + new short[] { 0x01, 0x02, 0x03, 0x00, 0x04, + 0x11, 0x05, 0x12, 0x21, 0x31, + 0x41, 0x06, 0x13, 0x51, 0x61, + 0x07, 0x22, 0x71, 0x14, 0x32, + 0x81, 0x91, 0xa1, 0x08, 0x23, + 0x42, 0xb1, 0xc1, 0x15, 0x52, + 0xd1, 0xf0, 0x24, 0x33, 0x62, + 0x72, 0x82, 0x09, 0x0a, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x25, + 0x26, 0x27, 0x28, 0x29, 0x2a, + 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x43, 0x44, 0x45, + 0x46, 0x47, 0x48, 0x49, 0x4a, + 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x63, 0x64, + 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6a, 0x73, 0x74, 0x75, 0x76, + 0x77, 0x78, 0x79, 0x7a, 0x83, + 0x84, 0x85, 0x86, 0x87, 0x88, + 0x89, 0x8a, 0x92, 0x93, 0x94, + 0x95, 0x96, 0x97, 0x98, 0x99, + 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, + 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, + 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, + 0xb7, 0xb8, 0xb9, 0xba, 0xc2, + 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xd2, 0xd3, + 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, + 0xd9, 0xda, 0xe1, 0xe2, 0xe3, + 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, + 0xe9, 0xea, 0xf1, 0xf2, 0xf3, + 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, + 0xf9, 0xfa }, false); + + public static JpegHuffmanTable StdDCChrominance = + new JpegHuffmanTable(new short[] { 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 0, 0, 0, 0 }, + new short[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11 }, false); + + public static JpegHuffmanTable StdDCLuminance = + new JpegHuffmanTable(new short[] { 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0 }, + new short[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11 }, false); + + #endregion + + + /// + /// Construct and initialize a Huffman table. Copies are created of + /// the array arguments. lengths[index] stores the number of Huffman + /// values with Huffman codes of length index + 1. The values array + /// stores the Huffman values in order of increasing code length. + /// + /// throws ArgumentException if either parameter is null, if + /// lengths.Length > 16 or values.Length > 256, if any value in + /// length or values is negative, or if the parameters do not + /// describe a valid Huffman table + /// + /// an array of Huffman code lengths + /// a sorted array of Huffman values + public JpegHuffmanTable(short[] lengths, short[] values) + // Create copies of the lengths and values arguments. + : this(checkLengths(lengths), checkValues(values, lengths), true) + { + } + + /// + /// Private constructor that avoids unnecessary copying and argument checking. + /// + /// lengths an array of Huffman code lengths + /// a sorted array of Huffman values + /// true if copies should be created of the given arrays + private JpegHuffmanTable(short[] lengths, short[] values, bool copy) + { + this.lengths = copy ? (short[])lengths.Clone() : lengths; + this.values = copy ? (short[])values.Clone() : values; + } + + private static short[] checkLengths(short[] lengths) + { + if (lengths == null || lengths.Length > 16) + throw new ArgumentException("Length array is null or too long."); + + if(lengths.Any(x => x < 0)) + throw new ArgumentException("Negative values cannot appear in the length array."); + + for (int i = 0; i < lengths.Length; i++) + { + if (lengths[i] > ((1 << (i + 1)) - 1)) + throw new ArgumentException( + string.Format("Invalid number of codes for code length {0}", (i + 1).ToString() )); + } + + return lengths; + } + + private static short[] checkValues(short[] values, short[] lengths) + { + if (values == null || values.Length > 256) + throw new ArgumentException("Values array is null or too long."); + + if (values.Any(x => x < 0)) + throw new ArgumentException("Negative values cannot appear in the values array."); + + if (values.Length != lengths.Sum(x => (int)x)) + throw new ArgumentException("Number of values does not match code length sum."); + + return values; + } + + /// + /// Retrieve the array of Huffman code lengths. If the + /// returned array is called lengthcount, there are + /// lengthcount[index] codes of length index + 1. + /// + public short[] Lengths { get { return lengths; } } + public short[] Values { get { return values; } } + + } + +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegQuantizationTable.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegQuantizationTable.cs new file mode 100644 index 0000000..d1f073a --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegQuantizationTable.cs @@ -0,0 +1,116 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; + +namespace FluxJpeg.Core +{ + internal class JpegQuantizationTable + { + // The table entries, stored in natural order. + private int[] table; public int[] Table { get { return table; } } + + /// + /// The standard JPEG luminance quantization table. Values are + /// stored in natural order. + /// + public static JpegQuantizationTable K1Luminance = new JpegQuantizationTable(new int[] + { + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99 + }, false); + + /// + /// The standard JPEG luminance quantization table, scaled by + /// one-half. Values are stored in natural order. + /// + public static JpegQuantizationTable K1Div2Luminance = + K1Luminance.getScaledInstance(0.5f, true); + + /// + /// The standard JPEG chrominance quantization table. Values are + /// stored in natural order. + /// + public static JpegQuantizationTable K2Chrominance = new JpegQuantizationTable(new int[] + { + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99 + }, false); + + /// + /// The standard JPEG chrominance quantization table, scaled by + /// one-half. Values are stored in natural order. + /// + public static JpegQuantizationTable K2Div2Chrominance = + K2Chrominance.getScaledInstance(0.5f, true); + + /// + /// Construct a new JPEG quantization table. A copy is created of + /// the table argument. + /// + /// The 64-element value table, stored in natural order + public JpegQuantizationTable(int[] table) + : this(checkTable(table), true) + { + } + + /// + /// Private constructor that avoids unnecessary copying and argument + /// checking. + /// + /// the 64-element value table, stored in natural order + /// true if a copy should be created of the given table + private JpegQuantizationTable(int[] table, bool copy) + { + this.table = copy ? (int[])table.Clone() : table; + } + + private static int[] checkTable(int[] table) + { + if (table == null || table.Length != 64) + throw new ArgumentException("Invalid JPEG quantization table"); + + return table; + } + + /// + /// Retrieve a copy of this JPEG quantization table with every value + /// scaled by the given scale factor, and clamped from 1 to 255 + /// + /// the factor by which to scale this table + /// clamp scaled values to a maximum of 255 if baseline or from 1 to 32767 otherwise. + /// new scaled JPEG quantization table + public JpegQuantizationTable getScaledInstance(float scaleFactor, + bool forceBaseline) + { + int[] scaledTable = (int[])table.Clone(); + int max = forceBaseline ? 255 : 32767; + + for (int i = 0; i < scaledTable.Length; i++) + { + scaledTable[i] = (int)Math.Round(scaleFactor * (float)scaledTable[i]); + if (scaledTable[i] < 1) + scaledTable[i] = 1; + else if (scaledTable[i] > max) + scaledTable[i] = max; + } + + return new JpegQuantizationTable(scaledTable, false); + } + + } + + +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegScan.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegScan.cs new file mode 100644 index 0000000..38e1911 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Decoder/JpegScan.cs @@ -0,0 +1,37 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace FluxJpeg.Core.Decoder +{ + internal class JpegScan + { + private List components = new List(); + public IList Components { get { return components.AsReadOnly(); } } + + private int maxV = 0, maxH = 0; + internal int MaxH { get { return maxH; } } + internal int MaxV { get { return maxV; } } + + public void AddComponent(byte id, byte factorHorizontal, byte factorVertical, + byte quantizationID, byte colorMode) + { + JpegComponent component = new JpegComponent( this, + id, factorHorizontal, factorVertical, quantizationID, colorMode); + + components.Add(component); + + // Defined in Annex A + maxH = components.Max(x => x.factorH); + maxV = components.Max(x => x.factorV); + } + + public JpegComponent GetComponentById(byte Id) + { + return components.First(x => x.component_id == Id); + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Encoder/JpegEncoder.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Encoder/JpegEncoder.cs new file mode 100644 index 0000000..3ff9a93 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Encoder/JpegEncoder.cs @@ -0,0 +1,327 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. +// +// Partially derives from a Java encoder, JpegEncoder.java by James R Weeks. +// Implements Baseline JPEG Encoding http://www.opennet.ru/docs/formats/jpeg.txt + +using System; +using System.Collections.Generic; +using System.IO; + +namespace FluxJpeg.Core.Encoder +{ + public class JpegEncodeProgressChangedArgs : EventArgs + { + public double EncodeProgress; // 0.0 to 1.0 + } + + public class JpegEncoder + { + JpegEncodeProgressChangedArgs _progress; + + DecodedJpeg _input; + Stream _outStream; + HuffmanTable _huf; + DCT _dct; + + int _height; + int _width; + int _quality; + + private const int Ss = 0; + private const int Se = 63; + private const int Ah = 0; + private const int Al = 0; + + private static readonly int[] CompID = { 1, 2, 3 }; + private static readonly int[] HsampFactor = { 1, 1, 1 }; + private static readonly int[] VsampFactor = { 1, 1, 1 }; + private static readonly int[] QtableNumber = { 0, 1, 1 }; + private static readonly int[] DCtableNumber = { 0, 1, 1 }; + private static readonly int[] ACtableNumber = { 0, 1, 1 }; + + public event EventHandler EncodeProgressChanged; + + public JpegEncoder(Image image, int quality, Stream outStream) + : this(new DecodedJpeg(image), quality, outStream) { /* see overload */ } + + /// + /// Encodes a JPEG, preserving the colorspace and metadata of the input JPEG. + /// + /// Decoded Jpeg to start with. + /// Quality of the image from 0 to 100. (Compression from max to min.) + /// Stream where the result will be placed. + public JpegEncoder(DecodedJpeg decodedJpeg, int quality, Stream outStream) + { + _input = decodedJpeg; + + /* This encoder requires YCbCr */ + _input.Image.ChangeColorSpace(ColorSpace.YCbCr); + + _quality = quality; + + _height = _input.Image.Height; + _width = _input.Image.Width; + _outStream = outStream; + _dct = new DCT(_quality); + _huf = new HuffmanTable(null); + } + + public void Encode() + { + _progress = new JpegEncodeProgressChangedArgs(); + + WriteHeaders(); + CompressTo(_outStream); + WriteMarker(new byte[] { 0xFF, 0xD9 }); // End of Image + + _progress.EncodeProgress = 1.0; + if (EncodeProgressChanged != null) + EncodeProgressChanged(this, _progress); + + _outStream.Flush(); + } + + internal void WriteHeaders() + { + int i, j, index, offset; + int[] tempArray; + + // Start of Image + byte[] SOI = { (byte)0xFF, (byte)0xD8 }; + WriteMarker(SOI); + + if (!_input.HasJFIF) // Supplement JFIF if missing + { + byte[] JFIF = new byte[18] + { + (byte)0xff, (byte)0xe0, + (byte)0x00, (byte)0x10, + (byte)0x4a, (byte)0x46, + (byte)0x49, (byte)0x46, + (byte)0x00, (byte)0x01, + (byte)0x00, (byte)0x00, + (byte)0x00, (byte)0x01, + (byte)0x00, (byte)0x01, + (byte)0x00, (byte)0x00 + }; + + WriteArray(JFIF); + } + + IO.BinaryWriter writer = new IO.BinaryWriter(_outStream); + + /* APP headers and COM headers follow the same format + * which has a 16-bit integer length followed by a block + * of binary data. */ + foreach (JpegHeader header in _input.MetaHeaders) + { + writer.Write(JPEGMarker.XFF); + writer.Write(header.Marker); + + // Header's length + writer.Write((short)(header.Data.Length + 2)); + writer.Write(header.Data); + } + + // The DQT header + // 0 is the luminance index and 1 is the chrominance index + byte[] DQT = new byte[134]; + DQT[0] = JPEGMarker.XFF; + DQT[1] = JPEGMarker.DQT; + DQT[2] = (byte)0x00; + DQT[3] = (byte)0x84; + offset = 4; + for (i = 0; i < 2; i++) + { + DQT[offset++] = (byte)((0 << 4) + i); + tempArray = (int[])_dct.quantum[i]; + + for (j = 0; j < 64; j++) + { + DQT[offset++] = (byte)tempArray[ ZigZag.ZigZagMap[j] ]; + } + } + + WriteArray(DQT); + + // Start of Frame Header ( Baseline JPEG ) + byte[] SOF = new byte[19]; + SOF[0] = JPEGMarker.XFF; + SOF[1] = JPEGMarker.SOF0; + SOF[2] = (byte)0x00; + SOF[3] = (byte)17; + SOF[4] = (byte)_input.Precision; + SOF[5] = (byte)((_input.Image.Height >> 8) & 0xFF); + SOF[6] = (byte)((_input.Image.Height) & 0xFF); + SOF[7] = (byte)((_input.Image.Width >> 8) & 0xFF); + SOF[8] = (byte)((_input.Image.Width) & 0xFF); + SOF[9] = (byte)_input.Image.ComponentCount; + index = 10; + + for (i = 0; i < SOF[9]; i++) + { + SOF[index++] = (byte)JpegEncoder.CompID[i]; + SOF[index++] = (byte)((_input.HsampFactor[i] << 4) + _input.VsampFactor[i]); + SOF[index++] = (byte)JpegEncoder.QtableNumber[i]; + } + + WriteArray(SOF); + + // The DHT Header + byte[] DHT1, DHT2, DHT3, DHT4; + int bytes, temp, oldindex, intermediateindex; + index = 4; + oldindex = 4; + DHT1 = new byte[17]; + DHT4 = new byte[4]; + DHT4[0] = JPEGMarker.XFF; + DHT4[1] = JPEGMarker.DHT; + for (i = 0; i < 4; i++) + { + bytes = 0; + + // top 4 bits: table class (0=DC, 1=AC) + // bottom 4: index (0=luminance, 1=chrominance) + byte huffmanInfo = (i == 0) ? (byte)0x00 : + (i == 1) ? (byte)0x10 : + (i == 2) ? (byte)0x01 : (byte)0x11; + + DHT1[index++ - oldindex] = huffmanInfo; + + for (j = 0; j < 16; j++) + { + temp = _huf.bitsList[i][j]; + DHT1[index++ - oldindex] = (byte)temp; + bytes += temp; + } + + intermediateindex = index; + DHT2 = new byte[bytes]; + for (j = 0; j < bytes; j++) + { + DHT2[index++ - intermediateindex] = (byte)_huf.val[i][j]; + } + DHT3 = new byte[index]; + Array.Copy(DHT4, 0, DHT3, 0, oldindex); + Array.Copy(DHT1, 0, DHT3, oldindex, 17); + Array.Copy(DHT2, 0, DHT3, oldindex + 17, bytes); + DHT4 = DHT3; + oldindex = index; + } + DHT4[2] = (byte)(((index - 2) >> 8) & 0xFF); + DHT4[3] = (byte)((index - 2) & 0xFF); + WriteArray(DHT4); + + // Start of Scan Header + byte[] SOS = new byte[14]; + SOS[0] = JPEGMarker.XFF; + SOS[1] = JPEGMarker.SOS; + SOS[2] = (byte)0x00; + SOS[3] = (byte)12; + SOS[4] = (byte)_input.Image.ComponentCount; + + index = 5; + + for (i = 0; i < SOS[4]; i++) + { + SOS[index++] = (byte)JpegEncoder.CompID[i]; + SOS[index++] = (byte)((JpegEncoder.DCtableNumber[i] << 4) + JpegEncoder.ACtableNumber[i]); + } + + SOS[index++] = (byte)JpegEncoder.Ss; + SOS[index++] = (byte)JpegEncoder.Se; + SOS[index++] = (byte)((JpegEncoder.Ah << 4) + JpegEncoder.Al); + WriteArray(SOS); + + } + + + internal void CompressTo(Stream outStream) + { + int i = 0, j = 0, r = 0, c = 0, a = 0, b = 0; + int comp, xpos, ypos, xblockoffset, yblockoffset; + byte[,] inputArray = null; + float[,] dctArray1 = new float[8, 8]; + float[,] dctArray2 = new float[8, 8]; + int[] dctArray3 = new int[8 * 8]; + + int[] lastDCvalue = new int[_input.Image.ComponentCount]; + + int Width = 0, Height = 0; + int MinBlockWidth, MinBlockHeight; + + // This initial setting of MinBlockWidth and MinBlockHeight is done to + // ensure they start with values larger than will actually be the case. + MinBlockWidth = ((_width % 8 != 0) ? (int)(Math.Floor((double)_width / 8.0) + 1) * 8 : _width); + MinBlockHeight = ((_height % 8 != 0) ? (int)(Math.Floor((double)_height / 8.0) + 1) * 8 : _height); + for (comp = 0; comp < _input.Image.ComponentCount; comp++) + { + MinBlockWidth = Math.Min(MinBlockWidth, _input.BlockWidth[comp]); + MinBlockHeight = Math.Min(MinBlockHeight, _input.BlockHeight[comp]); + } + xpos = 0; + + for (r = 0; r < MinBlockHeight; r++) + { + // Keep track of progress + _progress.EncodeProgress = (double)r / MinBlockHeight; + if (EncodeProgressChanged != null) EncodeProgressChanged(this, _progress); + + for (c = 0; c < MinBlockWidth; c++) + { + xpos = c * 8; + ypos = r * 8; + for (comp = 0; comp < _input.Image.ComponentCount; comp++) + { + Width = _input.BlockWidth[comp]; + Height = _input.BlockHeight[comp]; + + inputArray = _input.Image.Raster[comp]; + + for (i = 0; i < _input.VsampFactor[comp]; i++) + { + for (j = 0; j < _input.HsampFactor[comp]; j++) + { + xblockoffset = j * 8; + yblockoffset = i * 8; + for (a = 0; a < 8; a++) + { + // set Y value. check bounds + int y = ypos + yblockoffset + a; if (y >= _height) break; + + for (b = 0; b < 8; b++) + { + int x = xpos + xblockoffset + b; if (x >= _width) break; + dctArray1[a, b] = inputArray[x,y]; + } + } + dctArray2 = _dct.FastFDCT(dctArray1); + dctArray3 = _dct.QuantizeBlock(dctArray2, JpegEncoder.QtableNumber[comp]); + + _huf.HuffmanBlockEncoder(outStream, dctArray3, lastDCvalue[comp], JpegEncoder.DCtableNumber[comp], JpegEncoder.ACtableNumber[comp]); + lastDCvalue[comp] = dctArray3[0]; + } + } + } + } + } + + _huf.FlushBuffer(outStream); + } + + + void WriteMarker(byte[] data) + { + _outStream.Write(data, 0, 2); + } + + void WriteArray(byte[] data) + { + int length = (((int)(data[2] & 0xFF)) << 8) + (int)(data[3] & 0xFF) + 2; + _outStream.Write(data, 0, length); + } + + } + +} \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/FDCT.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/FDCT.cs new file mode 100644 index 0000000..630ce88 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/FDCT.cs @@ -0,0 +1,201 @@ +using System; + +namespace FluxJpeg.Core +{ + public partial class DCT + { + public const int N = 8; + + public int[][] quantum = new int[2][]; + public double[][] divisors = new double[2][]; + + // Quantitization Matrix for luminace. + public double[] DivisorsLuminance = new double[N * N]; + + // Quantitization Matrix for chrominance. + public double[] DivisorsChrominance = new double[N * N]; + + public DCT(int quality) : this() + { + Initialize(quality); + } + + private void Initialize(int quality) + { + double[] aanScaleFactor = + { + 1.0, 1.387039845, 1.306562965, 1.175875602, + 1.0, 0.785694958, 0.541196100, 0.275899379 + }; + + int i, j, index, Quality; + + // jpeg_quality_scaling + if (quality <= 0) Quality = 1; + if (quality > 100) Quality = 100; + if (quality < 50) Quality = 5000 / quality; + else Quality = 200 - quality * 2; + + int[] scaledLum = JpegQuantizationTable.K1Luminance + .getScaledInstance(Quality / 100f, true).Table; + + index = 0; + for (i = 0; i < 8; i++) + { + for (j = 0; j < 8; j++) + { + DivisorsLuminance[index] = + (double)1.0 / + ((double)scaledLum[index] * aanScaleFactor[i] * aanScaleFactor[j] * 8.0); + + index++; + } + } + + // Creating the chrominance matrix + int[] scaledChrom = JpegQuantizationTable.K2Chrominance + .getScaledInstance(Quality / 100f, true).Table; + + index = 0; + for (i = 0; i < 8; i++) + { + for (j = 0; j < 8; j++) + { + DivisorsChrominance[index] = (double)((double)1.0 / ((double)scaledChrom[index] * aanScaleFactor[i] * aanScaleFactor[j] * (double)8.0)); + index++; + } + } + + quantum[0] = scaledLum; + divisors[0] = DivisorsLuminance; + quantum[1] = scaledChrom; + divisors[1] = DivisorsChrominance; + } + + internal float[,] FastFDCT(float[,] input) + { + float[,] output = new float[N, N]; + + float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + float tmp10, tmp11, tmp12, tmp13; + float z1, z2, z3, z4, z5, z11, z13; + int i, j; + + for (i = 0; i < 8; i++) + for (j = 0; j < 8; j++) + output[i, j] = input[i, j] - 128f; + + // Pass 1: process rows. + + for (i = 0; i < 8; i++) + { + tmp0 = output[i, 0] + output[i, 7]; + tmp7 = output[i, 0] - output[i, 7]; + tmp1 = output[i, 1] + output[i, 6]; + tmp6 = output[i, 1] - output[i, 6]; + tmp2 = output[i, 2] + output[i, 5]; + tmp5 = output[i, 2] - output[i, 5]; + tmp3 = output[i, 3] + output[i, 4]; + tmp4 = output[i, 3] - output[i, 4]; + + // Even part + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + output[i, 0] = tmp10 + tmp11; + output[i, 4] = tmp10 - tmp11; + + z1 = (tmp12 + tmp13) * (float)0.707106781; + output[i, 2] = tmp13 + z1; + output[i, 6] = tmp13 - z1; + + // Odd part + tmp10 = tmp4 + tmp5; + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + // The rotator is modified from fig 4-8 to avoid extra negations. + z5 = (tmp10 - tmp12) * (float)0.382683433; + z2 = ((float)0.541196100) * tmp10 + z5; + z4 = ((float)1.306562965) * tmp12 + z5; + z3 = tmp11 * ((float)0.707106781); + + z11 = tmp7 + z3; + z13 = tmp7 - z3; + + output[i, 5] = z13 + z2; + output[i, 3] = z13 - z2; + output[i, 1] = z11 + z4; + output[i, 7] = z11 - z4; + } + + // Pass 2: process columns + + for (i = 0; i < 8; i++) + { + tmp0 = output[0, i] + output[7, i]; + tmp7 = output[0, i] - output[7, i]; + tmp1 = output[1, i] + output[6, i]; + tmp6 = output[1, i] - output[6, i]; + tmp2 = output[2, i] + output[5, i]; + tmp5 = output[2, i] - output[5, i]; + tmp3 = output[3, i] + output[4, i]; + tmp4 = output[3, i] - output[4, i]; + + // Even part + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + output[0, i] = tmp10 + tmp11; + output[4, i] = tmp10 - tmp11; + + z1 = (tmp12 + tmp13) * (float)0.707106781; + output[2, i] = tmp13 + z1; + output[6, i] = tmp13 - z1; + + // Odd part + tmp10 = tmp4 + tmp5; + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + // The rotator is modified from fig 4-8 to avoid extra negations. + z5 = (tmp10 - tmp12) * (float)0.382683433; + z2 = ((float)0.541196100) * tmp10 + z5; + z4 = ((float)1.306562965) * tmp12 + z5; + z3 = tmp11 * ((float)0.707106781); + + z11 = tmp7 + z3; + z13 = tmp7 - z3; + + output[5, i] = z13 + z2; + output[3, i] = z13 - z2; + output[1, i] = z11 + z4; + output[7, i] = z11 - z4; + } + + return output; + } + + + internal int[] QuantizeBlock(float[,] inputData, int code) + { + int[] result = new int[N * N]; + int index = 0; + + for (int i = 0; i < N; i++) + for (int j = 0; j < N; j++) + { + result[index] = (int)(Math.Round(inputData[i, j] * divisors[code][index])); + index++; + } + + return result; + } + + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/Convolution.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/Convolution.cs new file mode 100644 index 0000000..9099e8c --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/Convolution.cs @@ -0,0 +1,404 @@ +/// Copyright (c) 2009 Jeffrey Powers for Occipital Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Threading; + +using FluxJpeg.Core; + +namespace FluxJpeg.Core.Filtering +{ + public class Convolution + { + + public static readonly Convolution Instance = new Convolution(); + + public GrayImage GaussianConv(GrayImage data, double std) + { + float[] filter = GaussianFilter(std); + return Conv2DSeparable(data, filter); + } + + public float[] GaussianFilter(double std) + { + const double Precision = 0.01f; + + double var = std * std; + + double n = Math.Sqrt(-1 * var * Math.Log(Precision)); + int half = (int)Math.Ceiling(n); + + float[] filter = new float[half]; + + double sum = -1.0; + for (int i = 0; i < half; i++) + { + double val = Math.Exp(-0.5 * (i * i) / var); + filter[i] = (float)val; + sum += 2 * val; + } + + /* Normalize */ + for (int i = 0; i < half; i++) + filter[i] /= (float)sum; + + return filter; + + } + + public GrayImage Conv2DSeparable(GrayImage data, float[] filter) + { + GrayImage pass1 = Filter1DSymmetric(data, filter, true); + GrayImage result = Filter1DSymmetric(pass1, filter, true); + + return result; + } + + private struct FilterJob + { + public float[] filter; + public int start; + public int end; + public GrayImage data; + public GrayImage result; + public int dataPtr; + public int destPtr; + } + + + /// + /// Filters an GrayImage with a 1D symmetric filter along the X-axis. + /// (This operation is multithreaded) + /// + /// GrayImage to be operated on + /// Filter to use (center tap plus right-hand-side) + /// Transpose the result? + /// Transposed, filtered GrayImage. + public GrayImage Filter1DSymmetric(GrayImage data, float[] filter, bool transpose) + { + GrayImage result = transpose ? + new GrayImage(data.Height, data.Width) : + new GrayImage(data.Width, data.Height); + + int startY = 0; + + int destPtr = transpose ? startY : (startY * result.Width); + + FilterJob job + = new FilterJob + { + filter = filter, + data = data, + destPtr = destPtr, + result = result, + start = startY, + end = data.Height / 2 + }; + + ParameterizedThreadStart del = transpose ? + new ParameterizedThreadStart(FilterPartSymmetricT) : + new ParameterizedThreadStart(FilterPartSymmetric); + + Thread worker = new Thread(del); + worker.Start(job); + + startY = data.Height / 2; + destPtr = transpose ? startY : (startY * result.Width); + + + job.start = startY; + job.destPtr = destPtr; + job.end = data.Height; + + del((object)job); // Run the appropriate filter in this thread, too + + worker.Join(); + + + return result; + } + + + /// + /// Convolves part of an GrayImage with a 1D filter along the X-axis + /// and transposes it as well. + /// + /// Filter operation details + private void FilterPartSymmetricT(object filterJob) + { + FilterJob fj = (FilterJob)filterJob; + + + GrayImage data = fj.data; + float[] srcData = data.Scan0; + float[] filter = fj.filter; + GrayImage result = fj.result; + + int pad = filter.Length - 1; + + #region Filter and transpose + for (int y = fj.start; y < fj.end; y++) + { + int rowStart = y * data.Width; + + int ptr = rowStart; + + // Left checked region + for (int x = 0; x < pad; x++) + { + float pixel = srcData[ptr] * filter[0]; + + // Part of the filter that fits within the GrayImage + for (int i = 1; i < x + 1; i++) + pixel += (srcData[ptr + i] + srcData[ptr - i]) * filter[i]; + + // Part of the filter that falls off the left side + for (int i = x + 1; i < filter.Length; i++) + pixel += (srcData[ptr + i] + srcData[ptr + i]) * filter[i]; + + result[y, x] = pixel; ptr++; + } + + // Unchecked region + for (int x = pad; x < data.Width - pad; x++) + { + float pixel = srcData[ptr] * filter[0]; + + for (int i = 1; i < filter.Length; i++) + pixel += (srcData[ptr + i] + srcData[ptr - i]) * filter[i]; + + result[y, x] = pixel; ptr++; + } + + // Right checked region + for (int x = data.Width - pad; x < data.Width; x++) + { + float pixel = srcData[ptr] * filter[0]; + + // Part of the filter that fits within the GrayImage + for (int i = 1; i < (data.Width - x); i++) + pixel += (srcData[ptr + i] + srcData[ptr - i]) * filter[i]; + + // Part of the filter that falls off the right side + for (int i = (data.Width - x); i < filter.Length; i++) + pixel += (srcData[ptr - i] + srcData[ptr - i]) * filter[i]; + + result[y, x] = pixel; ptr++; + } + } + #endregion + + } + + /// + /// Convolves an GrayImage with a 1D filter along the X-axis. + /// + /// Filter operation details + private void FilterPartSymmetric(object filterJob) + { + FilterJob fj = (FilterJob)filterJob; + + GrayImage data = fj.data; + float[] srcData = data.Scan0; + float[] filter = fj.filter; + GrayImage result = fj.result; + float[] resData = result.Scan0; + + int pad = filter.Length - 1; + + int destPtr = fj.destPtr; + + #region Filter (no transpose) + for (int y = fj.start; y < fj.end; y++) + { + int rowStart = y * data.Width; + + int ptr = fj.dataPtr + rowStart; + + // Left checked region + for (int x = 0; x < pad; x++) + { + float pixel = srcData[ptr] * filter[0]; + + // Part of the filter that fits within the GrayImage + for (int i = 1; i < x + 1; i++) + pixel += (srcData[ptr + i] + srcData[ptr - i]) * filter[i]; + + // Part of the filter that falls off the left side + for (int i = x + 1; i < filter.Length; i++) + pixel += (srcData[ptr + i] + srcData[ptr + i]) * filter[i]; + + resData[destPtr++] = pixel; ptr++; + } + + // Unchecked region + for (int x = pad; x < data.Width - pad; x++) + { + float pixel = srcData[ptr] * filter[0]; + + for (int i = 1; i < filter.Length; i++) + pixel += (srcData[ptr + i] + srcData[ptr - i]) * filter[i]; + + resData[destPtr++] = pixel; ptr++; + } + + // Right checked region + for (int x = data.Width - pad; x < data.Width; x++) + { + float pixel = srcData[ptr] * filter[0]; + + // Part of the filter that fits within the GrayImage + for (int i = 0; i < (data.Width - x); i++) + pixel += (srcData[ptr + i] + srcData[ptr - i]) * filter[i]; + + // Part of the filter that falls off the right side + for (int i = (data.Width - x); i < filter.Length; i++) + pixel += (srcData[ptr + i] + srcData[ptr - i]) * filter[i]; + + resData[destPtr++] = pixel; ptr++; + } + } + + #endregion + + } + + + + public GrayImage Conv2DSymmetric(GrayImage data, GrayImage opLR) + { + int xPad = opLR.Width - 1; + int yPad = opLR.Height - 1; + + GrayImage padded = new GrayImage(data.Width + 2 * xPad, data.Height + 2 * yPad); + + int dataIdx = 0; + for (int y = 0; y < data.Height; y++) + { + int rowStart = (y + yPad) * (data.Width + 2 * xPad) + xPad; + for (int x = 0; x < data.Width; x++) + { + padded.Scan0[rowStart + x] = data.Scan0[dataIdx]; + dataIdx++; + } + } + + return Conv2DSymm(padded, opLR); + } + + + /// + /// Convolves an GrayImage with a 2D-symmetric operator. + /// + /// Data to be convolved with the operator + /// Lower-right quadrant of the operator. + /// + private GrayImage Conv2DSymm(GrayImage data, GrayImage opLR) + { + if (opLR.Width % 2 != 0 || opLR.Height % 2 != 0) + throw new ArgumentException("Operator must have an even number of rows and columns."); + + int xPad = opLR.Width - 1; + int yPad = opLR.Height - 1; + + GrayImage result = new GrayImage(data.Width - 2 * xPad, data.Height - 2 * yPad); + + for (int y = yPad; y < data.Height - yPad; y++) + { + for (int x = xPad; x < data.Width - xPad; x++) + { + // Center pixel + float pixel = data[x, y] * opLR.Scan0[0]; + + // Vertical center + for (int op_y = 1; op_y < opLR.Height; op_y++) + pixel += (data[x, y + op_y] + data[x, y - op_y]) * opLR[0, op_y]; + + //Horizontal center + for (int op_x = 1; op_x < opLR.Width; op_x++) + pixel += (data[x + op_x, y] + data[x - op_x, y]) * opLR[op_x, 0]; + + //Quadrants + int opIdx = 1; + + for (int op_y = 1; op_y < opLR.Height; op_y++) + { + int baseIdx1 = ((y + op_y) * data.Width) + x; + int baseIdx2 = ((y - op_y) * data.Width) + x; + + // Loop unrolling can save 25% execution time here + + for (int op_x = 1; op_x < opLR.Width; op_x++) + { + pixel += (data.Scan0[baseIdx1 + op_x] + + data.Scan0[baseIdx2 + op_x] + + data.Scan0[baseIdx1 - op_x] + + data.Scan0[baseIdx2 - op_x]) * opLR.Scan0[opIdx]; + + opIdx++; + } + + opIdx++; // Skip 0th col on next row + } + + result[x - xPad, y - yPad] = pixel; + + } // loop over data x + + } // loop over data y + + return result; + } + + /// + /// Vanilla 2D convolution. Not optimized. + /// + /// + /// + /// + public GrayImage Conv2D(GrayImage data, GrayImage op) + { + GrayImage result = new GrayImage(data.Width, data.Height); + + if (op.Width % 2 == 0 || op.Height % 2 == 0) + throw new ArgumentException("Operator must have an odd number of rows and columns."); + + int x_offset = op.Width / 2; + int y_offset = op.Height / 2; + + for (int y = 0; y < data.Height; y++) + { + for (int x = 0; x < data.Width; x++) + { + float pixel = 0; + float wt = 0; + + for (int op_y = 0; op_y < op.Height; op_y++) + { + int d_y = y - y_offset + op_y; + if (d_y < 0 || d_y >= data.Height) continue; + + for (int op_x = 0; op_x < op.Width; op_x++) + { + int d_x = x - x_offset + op_x; + if (d_x < 0 || d_x >= data.Width) continue; + + float op_val = op[op_x, op_y]; + + /* Perform actual convolution */ + wt += Math.Abs(op_val); + pixel += data[d_x, d_y] * op_val; + } + } + + result[x, y] = pixel / wt; + + } // loop over data x + + } // loop over data y + + return result; + } + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterBase.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterBase.cs new file mode 100644 index 0000000..a3f4b3a --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterBase.cs @@ -0,0 +1,47 @@ +/// Copyright (c) 2008-09 Jeffrey Powers for Occipital Open Source. +/// Under the MIT License, details: License.txt. + +namespace FluxJpeg.Core.Filtering +{ + using System; + + public enum ResamplingFilters + { + NearestNeighbor, + LowpassAntiAlias + //Bicubic + } + + public class FilterProgressEventArgs : EventArgs { public double Progress; } + + internal abstract class Filter + { + protected int _newWidth, _newHeight; + protected byte[][,] _sourceData, _destinationData; + protected bool _color; + + public event EventHandler ProgressChanged; + FilterProgressEventArgs progressArgs = new FilterProgressEventArgs(); + + protected void UpdateProgress(double progress) + { + progressArgs.Progress = progress; + if (ProgressChanged != null) ProgressChanged(this, progressArgs); + } + + public byte[][,] Apply( byte[][,] imageData, int newWidth, int newHeight ) + { + _newHeight = newHeight; + _newWidth = newWidth; + _color = !(imageData.Length == 1); + _destinationData = Image.CreateRaster(newWidth, newHeight, imageData.Length); + _sourceData = imageData; + + ApplyFilter(); + + return _destinationData; + } + + protected abstract void ApplyFilter(); + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterLowpassResize.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterLowpassResize.cs new file mode 100644 index 0000000..e45004e --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterLowpassResize.cs @@ -0,0 +1,44 @@ +/// Copyright (c) 2009 Jeffrey Powers for Occipital Open Source. +/// Under the MIT License, details: License.txt. + +namespace FluxJpeg.Core.Filtering +{ + using System; + + internal class LowpassResize : Filter + { + protected override void ApplyFilter() + { + // get source image size + int width = _sourceData[0].GetLength(0), + height = _sourceData[0].GetLength(1); + + int channels = _sourceData.Length; + + // Estimate a good filter size for the gaussian. + // Note that gaussian isn't an ideal bandpass filter + // so this is an experimentally determined quantity + double std = (width / _newWidth) * 0.50; + + for(int i = 0; i < channels; i++) + { + GrayImage channel = new GrayImage(_sourceData[i]); + + channel = Convolution.Instance.GaussianConv(channel, std); + + _sourceData[i] = channel.ToByteArray2D(); + } + + // number of pixels to shift in the original image + double xStep = (double)width / _newWidth, + yStep = (double)height / _newHeight; + + + NNResize resizer = new NNResize(); + + _destinationData = resizer.Apply(_sourceData, _newWidth, _newHeight); + + + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterNNResize.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterNNResize.cs new file mode 100644 index 0000000..44e3ddb --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/FilterNNResize.cs @@ -0,0 +1,47 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +namespace FluxJpeg.Core.Filtering +{ + using System; + + internal class NNResize : Filter + { + protected override void ApplyFilter() + { + // get source image size + int width = _sourceData[0].GetLength(0), + height = _sourceData[0].GetLength(1); + + // number of pixels to shift in the original image + double xStep = (double)width / _newWidth, + yStep = (double)height / _newHeight; + + double sX = 0.5*xStep, sY = 0.5*yStep; + int i_sY, i_sX; + + for (int y = 0; y < _newHeight; y++) + { + i_sY = (int)sY; sX = 0; + + UpdateProgress((double)y / _newHeight); + + for (int x = 0; x < _newWidth; x++) + { + i_sX = (int)sX; + + _destinationData[0][x, y] = _sourceData[0][i_sX, i_sY]; + + if (_color) { + + _destinationData[1][x, y] = _sourceData[1][i_sX, i_sY]; + _destinationData[2][x, y] = _sourceData[2][i_sX, i_sY]; + } + + sX += xStep; + } + sY += yStep; + } + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/GrayImage.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/GrayImage.cs new file mode 100644 index 0000000..0ea84e0 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Filter/GrayImage.cs @@ -0,0 +1,77 @@ +using System; +using System.Collections.Generic; +using System.Text; +using System.IO; +using System.Runtime.InteropServices; + +namespace FluxJpeg.Core.Filtering +{ + public class GrayImage + { + public float[] Scan0; + private int _width; + private int _height; + + public int Width { get { return _width; } } + public int Height { get { return _height; } } + + /// + /// Returns a new 0.0-initialized image of specified size. + /// + /// Width in pixels + /// Height in pixels + public GrayImage(int width, int height) + { + _width = width; _height = height; + Scan0 = new float[width * height]; + } + + /// + /// Creates a 0.0 to 1.0 grayscale image from a bitmap. + /// + public GrayImage(byte[,] channel) + { + Convert(channel); + } + + /// + /// Access a pixel within the image. + /// + /// X-coordinate + /// Y-coordinate + /// Pixel brightness between 0.0 and 1.0 + public float this[int x, int y] + { + get { return Scan0[y * _width + x]; } + set { Scan0[y * _width + x] = value; } + } + + private void Convert(byte[,] channel) + { + _width = channel.GetLength(0); + _height = channel.GetLength(1); + + Scan0 = new float[_width* _height]; + + int i = 0; + + for (int y = 0; y < _height; y++) + for (int x = 0; x < _width; x++) + Scan0[i++] = channel[x, y] / 255f; + } + + public byte[,] ToByteArray2D() + { + byte[,] result = new byte[_width, _height]; + + int i = 0; + for (int y = 0; y < _height; y++) + for (int x = 0; x < _width; x++) + result[x, y] = (byte)(Scan0[i++] * 255f); + + return result; + } + + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/IJG.txt b/debian/missing-sources/plupload/csharp/Plupload/FJCore/IJG.txt new file mode 100644 index 0000000..08a1ed8 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/IJG.txt @@ -0,0 +1,90 @@ +This software is based in part on the work of the Independent JPEG Group: + +The Independent JPEG Group's JPEG software +========================================== + +LEGAL ISSUES +============ + +In plain English: + +1. We don't promise that this software works. (But if you find any bugs, + please let us know!) +2. You can use this software for whatever you want. You don't have to pay us. +3. You may not pretend that you wrote this software. If you use it in a + program, you must acknowledge somewhere in your documentation that + you've used the IJG code. + +In legalese: + +The authors make NO WARRANTY or representation, either express or implied, +with respect to this software, its quality, accuracy, merchantability, or +fitness for a particular purpose. This software is provided "AS IS", and you, +its user, assume the entire risk as to its quality and accuracy. + +This software is copyright (C) 1991-1998, Thomas G. Lane. +All Rights Reserved except as specified below. + +Permission is hereby granted to use, copy, modify, and distribute this +software (or portions thereof) for any purpose, without fee, subject to these +conditions: +(1) If any part of the source code for this software is distributed, then this +README file must be included, with this copyright and no-warranty notice +unaltered; and any additions, deletions, or changes to the original files +must be clearly indicated in accompanying documentation. +(2) If only executable code is distributed, then the accompanying +documentation must state that "this software is based in part on the work of +the Independent JPEG Group". +(3) Permission for use of this software is granted only if the user accepts +full responsibility for any undesirable consequences; the authors accept +NO LIABILITY for damages of any kind. + +These conditions apply to any software derived from or based on the IJG code, +not just to the unmodified library. If you use our work, you ought to +acknowledge us. + +Permission is NOT granted for the use of any IJG author's name or company name +in advertising or publicity relating to this software or products derived from +it. This software may be referred to only as "the Independent JPEG Group's +software". + +We specifically permit and encourage the use of this software as the basis of +commercial products, provided that all warranty or liability claims are +assumed by the product vendor. + + +ansi2knr.c is included in this distribution by permission of L. Peter Deutsch, +sole proprietor of its copyright holder, Aladdin Enterprises of Menlo Park, CA. +ansi2knr.c is NOT covered by the above copyright and conditions, but instead +by the usual distribution terms of the Free Software Foundation; principally, +that you must include source code if you redistribute it. (See the file +ansi2knr.c for full details.) However, since ansi2knr.c is not needed as part +of any program generated from the IJG code, this does not limit you more than +the foregoing paragraphs do. + +The Unix configuration script "configure" was produced with GNU Autoconf. +It is copyright by the Free Software Foundation but is freely distributable. +The same holds for its supporting scripts (config.guess, config.sub, +ltconfig, ltmain.sh). Another support script, install-sh, is copyright +by M.I.T. but is also freely distributable. + +It appears that the arithmetic coding option of the JPEG spec is covered by +patents owned by IBM, AT&T, and Mitsubishi. Hence arithmetic coding cannot +legally be used without obtaining one or more licenses. For this reason, +support for arithmetic coding has been removed from the free JPEG software. +(Since arithmetic coding provides only a marginal gain over the unpatented +Huffman mode, it is unlikely that very many implementations will support it.) +So far as we are aware, there are no patent restrictions on the remaining +code. + +The IJG distribution formerly included code to read and write GIF files. +To avoid entanglement with the Unisys LZW patent, GIF reading support has +been removed altogether, and the GIF writer has been simplified to produce +"uncompressed GIFs". This technique does not use the LZW algorithm; the +resulting GIF files are larger than usual, but are readable by all standard +GIF decoders. + +We are required to state that + "The Graphics Interchange Format(c) is the Copyright property of + CompuServe Incorporated. GIF(sm) is a Service Mark property of + CompuServe Incorporated." diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/BinaryReader.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/BinaryReader.cs new file mode 100644 index 0000000..70a0ee6 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/BinaryReader.cs @@ -0,0 +1,46 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.IO; + +namespace FluxJpeg.Core.IO +{ + /// + /// Big-endian binary reader + /// + internal class BinaryReader + { + Stream _stream; + byte[] _buffer; + + public Stream BaseStream { get { return _stream; } } + + public BinaryReader(byte[] data) : this(new MemoryStream(data)) { } + + public BinaryReader(Stream stream) + { + _stream = stream; + _buffer = new byte[2]; + } + + public byte ReadByte() + { + int b = _stream.ReadByte(); + if (b == -1) throw new EndOfStreamException(); + return (byte)b; + } + + public ushort ReadShort() + { + _stream.Read(_buffer, 0, 2); + return (ushort)((_buffer[0] << 8) | (_buffer[1] & 0xff)); + } + + public int Read(byte[] buffer, int offset, int count) + { + return _stream.Read(buffer, offset, count); + } + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/BinaryWriter.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/BinaryWriter.cs new file mode 100644 index 0000000..b9dc969 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/BinaryWriter.cs @@ -0,0 +1,45 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Text; +using System.IO; + +namespace FluxJpeg.Core.IO +{ + /// + /// A Big-endian binary writer. + /// + internal class BinaryWriter + { + private Stream _stream; + + internal BinaryWriter(Stream stream) + { + _stream = stream; + } + + internal void Write(byte[] val) + { + _stream.Write(val, 0, val.Length); + } + + internal void Write(byte[] val, int offset, int count) + { + _stream.Write(val, offset, count); + } + + + internal void Write(short val) + { + _stream.WriteByte((byte)(( val >> 8 ) & 0xFF)); + _stream.WriteByte((byte)(val & 0xFF)); + } + + internal void Write(byte val) + { + _stream.WriteByte(val); + } + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/JpegBinaryReader.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/JpegBinaryReader.cs new file mode 100644 index 0000000..759b248 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/IO/JpegBinaryReader.cs @@ -0,0 +1,117 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.IO; + +namespace FluxJpeg.Core.IO +{ + internal class JPEGMarkerFoundException : Exception + { + public JPEGMarkerFoundException(byte marker) { this.Marker = marker; } + public byte Marker; + } + + internal class JPEGBinaryReader : IO.BinaryReader + { + public int eob_run = 0; + + private byte marker; + + public JPEGBinaryReader(Stream input) + : base(input) + { + } + + /// + /// Seeks through the stream until a marker is found. + /// + public byte GetNextMarker() + { + try { while (true) { ReadJpegByte(); } } + catch (JPEGMarkerFoundException ex) { + return ex.Marker; + } + } + + byte _bitBuffer; + + protected int _bitsLeft = 0; + + public int BitOffset + { + get { return (8 - _bitsLeft) % 8; } + set + { + if (_bitsLeft != 0) BaseStream.Seek(-1, SeekOrigin.Current); + _bitsLeft = (8 - value) % 8; + } + } + + /// + /// Places n bits from the stream, where the most-significant bits + /// from the first byte read end up as the most-significant of the returned + /// n bits. + /// + /// Number of bits to return + /// Integer containing the bits desired -- shifted all the way right. + public int ReadBits(int n) + { + int result = 0; + + #region Special case -- included for optimization purposes + if (_bitsLeft >= n) + { + _bitsLeft-=n; + result = _bitBuffer >> (8 - n); + _bitBuffer = (byte)(_bitBuffer << n); + return result; + } + #endregion + + while (n > 0) + { + if (_bitsLeft == 0) + { + _bitBuffer = ReadJpegByte(); + _bitsLeft = 8; + } + + int take = n <= _bitsLeft ? n : _bitsLeft; + + result = result | ((_bitBuffer >> 8 - take) << (n - take)); + + _bitBuffer = (byte)(_bitBuffer << take); + + _bitsLeft -= take; + n -= take; + } + + return result; + } + + protected byte ReadJpegByte() + { + byte c = ReadByte(); + + /* If it's 0xFF, check and discard stuffed zero byte */ + if (c == JPEGMarker.XFF) + { + // Discard padded oxFFs + while ((c = ReadByte()) == 0xff) ; + + // ff00 is the escaped form of 0xff + if (c == 0) c = 0xff; + else + { + // Otherwise we've found a new marker. + marker = c; + throw new JPEGMarkerFoundException(marker); + } + } + + return c; + } + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Image.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Image.cs new file mode 100644 index 0000000..d1db8cd --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Image.cs @@ -0,0 +1,183 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +#if SILVERLIGHT +#else +using System.Drawing; +using System.Drawing.Imaging; +#endif + +namespace FluxJpeg.Core { + public struct ColorModel { + public ColorSpace colorspace; + public bool Opaque; + } + + public enum ColorSpace { Gray, YCbCr, RGB } + + public class Image { + private ColorModel _cm; + private byte[][,] _raster; + + public byte[][,] Raster { get { return _raster; } } + public ColorModel ColorModel { get { return _cm; } } + + /// X density (dots per inch). + public double DensityX { get; set; } + /// Y density (dots per inch). + public double DensityY { get; set; } + + public int ComponentCount { get { return _raster.Length; } } + + /// + /// Converts the colorspace of an image (in-place) + /// + /// Colorspace to convert into + /// Self + public Image ChangeColorSpace(ColorSpace cs) { + // Colorspace is already correct + if (_cm.colorspace == cs) return this; + + byte[] ycbcr = new byte[3]; + byte[] rgb = new byte[3]; + + if (_cm.colorspace == ColorSpace.RGB && cs == ColorSpace.YCbCr) { + /* + * Y' = + 0.299 * R'd + 0.587 * G'd + 0.114 * B'd + Cb = 128 - 0.168736 * R'd - 0.331264 * G'd + 0.5 * B'd + Cr = 128 + 0.5 * R'd - 0.418688 * G'd - 0.081312 * B'd + * + */ + + for (int x = 0; x < width; x++) + for (int y = 0; y < height; y++) { + YCbCr.fromRGB(ref _raster[0][x, y], ref _raster[1][x, y], ref _raster[2][x, y]); + } + + _cm.colorspace = ColorSpace.YCbCr; + + + } else if (_cm.colorspace == ColorSpace.YCbCr && cs == ColorSpace.RGB) { + + for (int x = 0; x < width; x++) + for (int y = 0; y < height; y++) { + // 0 is LUMA + // 1 is BLUE + // 2 is RED + + YCbCr.toRGB(ref _raster[0][x, y], ref _raster[1][x, y], ref _raster[2][x, y]); + } + + _cm.colorspace = ColorSpace.RGB; + } else if (_cm.colorspace == ColorSpace.Gray && cs == ColorSpace.YCbCr) { + // To convert to YCbCr, we just add two 128-filled chroma channels + + byte[,] Cb = new byte[width, height]; + byte[,] Cr = new byte[width, height]; + + for (int x = 0; x < width; x++) + for (int y = 0; y < height; y++) { + Cb[x, y] = 128; Cr[x, y] = 128; + } + + _raster = new byte[][,] { _raster[0], Cb, Cr }; + + _cm.colorspace = ColorSpace.YCbCr; + } else if (_cm.colorspace == ColorSpace.Gray && cs == ColorSpace.RGB) { + ChangeColorSpace(ColorSpace.YCbCr); + ChangeColorSpace(ColorSpace.RGB); + } else { + throw new Exception("Colorspace conversion not supported."); + } + + return this; + } + + private int width; private int height; + + public int Width { get { return width; } } + public int Height { get { return height; } } + + public Image(ColorModel cm, byte[][,] raster) { + width = raster[0].GetLength(0); + height = raster[0].GetLength(1); + + _cm = cm; + _raster = raster; + } + + public static byte[][,] CreateRaster(int width, int height, int bands) { + // Create the raster + byte[][,] raster = new byte[bands][,]; + for (int b = 0; b < bands; b++) + raster[b] = new byte[width, height]; + return raster; + } + + delegate void ConvertColor(ref byte c1, ref byte c2, ref byte c3); + +#if SILVERLIGHT +#else + public Bitmap ToBitmap() + { + ConvertColor ColorConverter; + + switch(_cm.colorspace) + { + case ColorSpace.YCbCr: + ColorConverter = YCbCr.toRGB; + break; + default: + throw new Exception("Colorspace not supported yet."); + } + + int _width = width; + int _height = height; + + Bitmap bitmap = new Bitmap(_width, _height, PixelFormat.Format32bppArgb); + + BitmapData bmData = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height), + System.Drawing.Imaging.ImageLockMode.WriteOnly, + System.Drawing.Imaging.PixelFormat.Format32bppArgb); + + byte[] outColor = new byte[3]; + byte[] inColor = new byte[3]; + + unsafe + { + int i = 0; + + byte* ptrBitmap = (byte*)bmData.Scan0; + + for (int y = 0; y < _height; y++) + { + for (int x = 0; x < _width; x++) + { + ptrBitmap[0] = (byte)_raster[0][x, y]; + ptrBitmap[1] = (byte)_raster[1][x, y]; + ptrBitmap[2] = (byte)_raster[2][x, y]; + + ColorConverter(ref ptrBitmap[0], ref ptrBitmap[1], ref ptrBitmap[2]); + + // Swap RGB --> BGR + byte R = ptrBitmap[0]; + ptrBitmap[0] = ptrBitmap[2]; + ptrBitmap[2] = R; + + ptrBitmap[3] = 255; /* 100% opacity */ + ptrBitmap += 4; // advance to the next pixel + i++; // " + } + } + } + + bitmap.UnlockBits(bmData); + + return bitmap; + + } +#endif + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/JAI.txt b/debian/missing-sources/plupload/csharp/Plupload/FJCore/JAI.txt new file mode 100644 index 0000000..999e4de --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/JAI.txt @@ -0,0 +1,40 @@ +This software is based in part on the Java Advanced Imaging IO by Sun Microsystems. + +That software is governed by the following license: +=================================================== + +Copyright (c) 2005 Sun Microsystems, Inc. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistribution of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistribution in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +Neither the name of Sun Microsystems, Inc. or the names of +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +This software is provided "AS IS," without a warranty of any +kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND +WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY +EXCLUDED. SUN MIDROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL +NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF +USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS +DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR +ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, +CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND +REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR +INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +You acknowledge that this software is not designed or intended for +use in the design, construction, operation or maintenance of any +nuclear facility. \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/JpegMarker.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/JpegMarker.cs new file mode 100644 index 0000000..2d18b0a --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/JpegMarker.cs @@ -0,0 +1,128 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; + +namespace FluxJpeg.Core +{ + internal sealed class JPEGMarker + { + // JFIF identifiers + public const byte JFIF_J = (byte)0x4a; + public const byte JFIF_F = (byte)0x46; + public const byte JFIF_I = (byte)0x49; + public const byte JFIF_X = (byte)0x46; + + // JFIF extension codes + public const byte JFXX_JPEG = (byte)0x10; + public const byte JFXX_ONE_BPP = (byte)0x11; + public const byte JFXX_THREE_BPP = (byte)0x13; + + // Marker prefix. Next byte is a marker, unless ... + public const byte XFF = (byte)0xff; + // ... marker byte encoding an xff. + public const byte X00 = (byte)0x00; + + #region Section Markers + + /// Huffman Table + public const byte DHT = (byte)0xc4; + + /// Quantization Table + public const byte DQT = (byte)0xdb; + + /// Start of Scan + public const byte SOS = (byte)0xda; + + /// Define Restart Interval + public const byte DRI = (byte)0xdd; + + /// Comment + public const byte COM = (byte)0xfe; + + /// Start of Image + public const byte SOI = (byte)0xd8; + + /// End of Image + public const byte EOI = (byte)0xd9; + + /// Define Number of Lines + public const byte DNL = (byte)0xdc; + + #endregion + + #region Application Reserved Keywords + + public const byte APP0 = (byte)0xe0; + public const byte APP1 = (byte)0xe1; + public const byte APP2 = (byte)0xe2; + public const byte APP3 = (byte)0xe3; + public const byte APP4 = (byte)0xe4; + public const byte APP5 = (byte)0xe5; + public const byte APP6 = (byte)0xe6; + public const byte APP7 = (byte)0xe7; + public const byte APP8 = (byte)0xe8; + public const byte APP9 = (byte)0xe9; + public const byte APP10 = (byte)0xea; + public const byte APP11 = (byte)0xeb; + public const byte APP12 = (byte)0xec; + public const byte APP13 = (byte)0xed; + public const byte APP14 = (byte)0xee; + public const byte APP15 = (byte)0xef; + + #endregion + + public const byte RST0 = (byte)0xd0; + public const byte RST1 = (byte)0xd1; + public const byte RST2 = (byte)0xd2; + public const byte RST3 = (byte)0xd3; + public const byte RST4 = (byte)0xd4; + public const byte RST5 = (byte)0xd5; + public const byte RST6 = (byte)0xd6; + public const byte RST7 = (byte)0xd7; + + #region Start of Frame (SOF) + + /// Nondifferential Huffman-coding frame (baseline dct) + public const byte SOF0 = (byte)0xc0; + + /// Nondifferential Huffman-coding frame (extended dct) + public const byte SOF1 = (byte)0xc1; + + /// Nondifferential Huffman-coding frame (progressive dct) + public const byte SOF2 = (byte)0xc2; + + /// Nondifferential Huffman-coding frame Lossless (Sequential) + public const byte SOF3 = (byte)0xc3; + + /// Differential Huffman-coding frame Sequential DCT + public const byte SOF5 = (byte)0xc5; + + /// Differential Huffman-coding frame Progressive DCT + public const byte SOF6 = (byte)0xc6; + + /// Differential Huffman-coding frame lossless + public const byte SOF7 = (byte)0xc7; + + /// Nondifferential Arithmetic-coding frame (extended dct) + public const byte SOF9 = (byte)0xc9; + + /// Nondifferential Arithmetic-coding frame (progressive dct) + public const byte SOF10 = (byte)0xca; + + /// Nondifferential Arithmetic-coding frame (lossless) + public const byte SOF11 = (byte)0xcb; + + /// Differential Arithmetic-coding frame (sequential dct) + public const byte SOF13 = (byte)0xcd; + + /// Differential Arithmetic-coding frame (progressive dct) + public const byte SOF14 = (byte)0xce; + + /// Differential Arithmetic-coding frame (lossless) + public const byte SOF15 = (byte)0xcf; + + #endregion + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/License.txt b/debian/missing-sources/plupload/csharp/Plupload/FJCore/License.txt new file mode 100644 index 0000000..99cc648 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/License.txt @@ -0,0 +1,24 @@ +Copyright (c) 2008-2009 Occipital Open Source + +Partial derivations: See IJG.txt and JAI.txt. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/README.txt b/debian/missing-sources/plupload/csharp/Plupload/FJCore/README.txt new file mode 100644 index 0000000..7069e9e --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/README.txt @@ -0,0 +1,30 @@ +FJCore: A Fluxcapacity Open Source Project +-------------------------------------------------------------------------------------------- + +Thanks for checking out FJCore, an image library including a pure C# implementation +of the JPEG baseline and progressive codecs. + +Design goals: + - No external dependencies (besides a C# compiler and ECMA-standard CIL runtime) + - High performance + - High image quality + - Simple, intuitive usage pattern + +With your help, we can make this one of the most readable and efficient libraries +available to run on any CIL runtime (including the .NET framework, Mono, and Silverlight)! + +Try FJCore online: fluxtools.net/emailphotos + +More information: fluxcapacity.net/open-source + +-------------------------------------------------------------------------------------------- + +April 7, 2008: + +- Initial release. + +July 13, 2008: + +- Encoder is now included. +- Silverlight project added (both FJCore and FJCoreWin share source). +- FJExample, a Silverlight test application, is included. \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/Resize/ImageResizer.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Resize/ImageResizer.cs new file mode 100644 index 0000000..3a896b5 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/Resize/ImageResizer.cs @@ -0,0 +1,98 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; +using System.Collections.Generic; +using System.Text; +using FluxJpeg.Core.Filtering; + +namespace FluxJpeg.Core +{ + public class ResizeNotNeededException : Exception { } + public class ResizeProgressChangedEventArgs : EventArgs { public double Progress; } + + public class ImageResizer + { + private ResizeProgressChangedEventArgs progress = new ResizeProgressChangedEventArgs(); + public event EventHandler ProgressChanged; + + private Image _input; + + public ImageResizer(Image input) + { + _input = input; + } + + public static bool ResizeNeeded(FluxJpeg.Core.Image image, int maxEdgeLength) + { + double scale = (image.Width > image.Height) ? + (double)maxEdgeLength / image.Width : + (double)maxEdgeLength / image.Height; + + return scale < 1.0; // true if we must downscale + } + + public Image Resize(int maxEdgeLength, ResamplingFilters technique) + { + double scale = 0; + + if (_input.Width > _input.Height) + scale = (double)maxEdgeLength / _input.Width; + else + scale = (double)maxEdgeLength / _input.Height; + + if (scale >= 1.0) + throw new ResizeNotNeededException(); + else + return Resize(scale, technique); + } + + public Image Resize(int maxWidth, int maxHeight, ResamplingFilters technique) + { + double wFrac = (double)maxWidth / _input.Width; + double hFrac = (double)maxHeight / _input.Height; + double scale = 0; + + // Make the image as large as possible, while + // fitting in the supplied box and + // obeying the aspect ratio + + if (wFrac < hFrac) { scale = wFrac; } + else { scale = hFrac; } + + if (scale >= 1.0) + throw new ResizeNotNeededException(); + else + return Resize(scale, technique); + } + + public Image Resize(double scale, ResamplingFilters technique) + { + int height = (int)(scale * _input.Height); + int width = (int)(scale * _input.Width); + + Filter resizeFilter; + + switch (technique) + { + case ResamplingFilters.NearestNeighbor: + resizeFilter = new NNResize(); + break; + case ResamplingFilters.LowpassAntiAlias: + resizeFilter = new LowpassResize(); + break; + default: + throw new NotSupportedException(); + } + + return new Image(_input.ColorModel, resizeFilter.Apply(_input.Raster, width, height)); + } + + void ResizeProgressChanged(object sender, Filtering.FilterProgressEventArgs e) + { + progress.Progress = e.Progress; + if (ProgressChanged != null) ProgressChanged(this, progress); + } + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/YCbCr.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/YCbCr.cs new file mode 100644 index 0000000..3128b36 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/YCbCr.cs @@ -0,0 +1,59 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +using System; + +namespace FluxJpeg.Core +{ + internal class YCbCr + { + + public static void toRGB(ref byte c1, ref byte c2, ref byte c3) + { + double dY = (double)c1; + double dCb2 = (double)c2 - 128; + double dCr2 = (double)c3 - 128; + + double dR = dY + 1.402 * dCr2; + double dG = dY - 0.34414 * dCb2 - 0.71414 * dCr2; + double dB = dY + 1.772 * dCb2; + + c1 = dR > 255 ? (byte)255 : dR < 0 ? (byte)0 : (byte)dR; + c2 = dG > 255 ? (byte)255 : dG < 0 ? (byte)0 : (byte)dG; + c3 = dB > 255 ? (byte)255 : dB < 0 ? (byte)0 : (byte)dB; + } + + public static void fromRGB(ref byte c1, ref byte c2, ref byte c3) + { + double dR = (double)c1; + double dG = (double)c2; + double dB = (double)c3; + + c1 = (byte)(0.299 * dR + 0.587 * dG + 0.114 * dB); + c2 = (byte)(-0.16874 * dR - 0.33126 * dG + 0.5 * dB + 128); + c3 = (byte)(0.5 * dR - 0.41869 * dG - 0.08131 * dB + 128); + } + + ///* RGB to YCbCr range 0-255 */ + //public static void fromRGB(byte[] rgb, byte[] ycbcr) + //{ + // ycbcr[0] = (byte)((0.299 * (float)rgb[0] + 0.587 * (float)rgb[1] + 0.114 * (float)rgb[2])); + // ycbcr[1] = (byte)(128 + (byte)((-0.16874 * (float)rgb[0] - 0.33126 * (float)rgb[1] + 0.5 * (float)rgb[2]))); + // ycbcr[2] = (byte)(128 + (byte)((0.5 * (float)rgb[0] - 0.41869 * (float)rgb[1] - 0.08131 * (float)rgb[2]))); + //} + + + /* RGB to YCbCr range 0-255 */ + public static float[] fromRGB(float[] data) + { + float[] dest = new float[3]; + + dest[0] = (float)((0.299 * (float)data[0] + 0.587 * (float)data[1] + 0.114 * (float)data[2])); + dest[1] = 128 + (float)((-0.16874 * (float)data[0] - 0.33126 * (float)data[1] + 0.5 * (float)data[2])); + dest[2] = 128 + (float)((0.5 * (float)data[0] - 0.41869 * (float)data[1] - 0.08131 * (float)data[2])); + + return (dest); + } + } + +} \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/FJCore/ZigZag.cs b/debian/missing-sources/plupload/csharp/Plupload/FJCore/ZigZag.cs new file mode 100644 index 0000000..0dd7ac0 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FJCore/ZigZag.cs @@ -0,0 +1,65 @@ +/// Copyright (c) 2008 Jeffrey Powers for Fluxcapacity Open Source. +/// Under the MIT License, details: License.txt. + +namespace FluxJpeg.Core +{ + internal class ZigZag + { + + #region Alternate Form + internal static readonly int[] ZigZagMap = + { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 + }; + //public static void UnZigZag(float[] input, float[] output) + //{ + // for (int i = 0; i < 64; i++) + // output[ZigZagMap[i] / 8, ZigZagMap[i] % 8] = input[i]; + //} + #endregion + + public static void UnZigZag(float[] input, float[] output) + { + output[0] = input[0]; output[1] = input[1]; + output[8] = input[2]; output[16] = input[3]; + output[9] = input[4]; output[2] = input[5]; + output[3] = input[6]; output[10] = input[7]; + output[17] = input[8]; output[24] = input[9]; + output[32] = input[10]; output[25] = input[11]; + output[18] = input[12]; output[11] = input[13]; + output[4] = input[14]; output[5] = input[15]; + output[12] = input[16]; output[19] = input[17]; + output[26] = input[18]; output[33] = input[19]; + output[40] = input[20]; output[48] = input[21]; + output[41] = input[22]; output[34] = input[23]; + output[27] = input[24]; output[20] = input[25]; + output[13] = input[26]; output[6] = input[27]; + output[7] = input[28]; output[14] = input[29]; + output[21] = input[30]; output[28] = input[31]; + output[35] = input[32]; output[42] = input[33]; + output[49] = input[34]; output[56] = input[35]; + output[57] = input[36]; output[50] = input[37]; + output[43] = input[38]; output[36] = input[39]; + output[29] = input[40]; output[22] = input[41]; + output[15] = input[42]; output[23] = input[43]; + output[30] = input[44]; output[37] = input[45]; + output[44] = input[46]; output[51] = input[47]; + output[58] = input[48]; output[59] = input[49]; + output[52] = input[50]; output[45] = input[51]; + output[38] = input[52]; output[31] = input[53]; + output[39] = input[54]; output[46] = input[55]; + output[53] = input[56]; output[60] = input[57]; + output[61] = input[58]; output[54] = input[59]; + output[47] = input[60]; output[55] = input[61]; + output[62] = input[62]; output[63] = input[63]; + } + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/FileReference.cs b/debian/missing-sources/plupload/csharp/Plupload/FileReference.cs new file mode 100644 index 0000000..4ba86a9 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/FileReference.cs @@ -0,0 +1,721 @@ +/** + * FileReference.cs + * + * Copyright 2009, Moxiecode Systems AB + * Released under GPL License. + * + * License: http://www.plupload.com/license + * Contributing: http://www.plupload.com/contributing + */ + +using System; +using System.IO; +using System.Threading; +using System.Windows.Threading; +using System.Net; +using System.Text.RegularExpressions; +using System.Windows.Browser; +using System.Windows.Media.Imaging; +using System.Collections.Generic; +using FluxJpeg.Core.Encoder; +using FluxJpeg.Core; +using Plupload.PngEncoder; + +namespace Moxiecode.Plupload { + enum ImageType { + Jpeg, + Png + } + + /// + /// Description of File. + /// + public class FileReference { + #region private fields + private string name, uploadUrl, id, targetName, mimeType; + private FileInfo info; + private SynchronizationContext syncContext; + private int chunks, chunkSize; + private bool multipart, chunking; + private long size, chunk; + private string fileDataName; + private Dictionary multipartParams; + private Dictionary headers; + private Stream fileStream; + private Stream imageStream; + private HttpWebRequest req; + #endregion + + /// Upload complete delegate. + public delegate void UploadCompleteHandler(object sender, UploadEventArgs e); + + /// Upload chunk compleate delegate. + public delegate void UploadChunkCompleteHandler(object sender, UploadEventArgs e); + + /// Upload error delegate. + public delegate void ErrorHandler(object sender, ErrorEventArgs e); + + /// Upload progress delegate. + public delegate void ProgressHandler(object sender, ProgressEventArgs e); + + /// Upload complete event + public event UploadCompleteHandler UploadComplete; + + /// Upload chunk complete event + public event UploadChunkCompleteHandler UploadChunkComplete; + + /// Error event + public event ErrorHandler Error; + + /// Progress event + public event ProgressHandler Progress; + + /// + /// Main constructor for the file reference. + /// + /// Unique file id for item. + /// FileInfo that got returned from a file selection. + public FileReference(string id, FileInfo info) { + this.id = id; + this.name = info.Name; + this.info = info; + this.size = info.Length; + } + + /// Unique id for the file reference. + public string Id { + get { return id; } + } + + /// File name to use with upload. + public string Name { + get { return name; } + set { name = value; } + } + + /// File size for the selected file. + public long Size { + get { return this.size; } + } + + /// + /// Uploads the file to the specific url and using the specified chunk_size. + /// + /// URL to upload to. + /// Chunk size to use. + /// Image width to scale to. + /// Image height to scale to. + /// Image quality to store as. + public void Upload(string upload_url, string json_settings) { + int chunkSize = 0, imageWidth = 0, imageHeight = 0, imageQuality = 90; + + Dictionary settings = (Dictionary) Moxiecode.Plupload.Utils.JsonReader.ParseJson(json_settings); + + chunkSize = Convert.ToInt32(settings["chunk_size"]); + imageWidth = Convert.ToInt32(settings["image_width"]); + imageHeight = Convert.ToInt32(settings["image_height"]); + imageQuality = Convert.ToInt32(settings["image_quality"]); + + this.fileDataName = (string)settings["file_data_name"]; + this.multipart = Convert.ToBoolean(settings["multipart"]); + this.multipartParams = (Dictionary)settings["multipart_params"]; + this.headers = (Dictionary)settings["headers"]; + this.targetName = (string) settings["name"]; + this.mimeType = (string) settings["mime"]; + + this.chunk = 0; + this.chunking = chunkSize > 0; + + this.uploadUrl = upload_url; + + try { + // Is jpeg and image size is defined + if (Regex.IsMatch(this.name, @"\.(jpeg|jpg|png)$", RegexOptions.IgnoreCase) && (imageWidth != 0 || imageHeight != 0 || imageQuality != 0)) + { + if (Regex.IsMatch(this.name, @"\.png$")) + this.imageStream = this.ResizeImage(this.info.OpenRead(), imageWidth, imageHeight, imageQuality, ImageType.Png); + else + this.imageStream = this.ResizeImage(this.info.OpenRead(), imageWidth, imageHeight, imageQuality, ImageType.Jpeg); + + this.imageStream.Seek(0, SeekOrigin.Begin); + this.size = this.imageStream.Length; + } + } catch (Exception ex) { + syncContext.Send(delegate { + this.OnIOError(new ErrorEventArgs(ex.Message, 0, this.chunks)); + }, this); + } + + if (this.chunking) { + this.chunkSize = chunkSize; + this.chunks = (int) Math.Ceiling((double) this.Size / (double) chunkSize); + } else { + this.chunkSize = (int) this.Size; + this.chunks = 1; + } + + this.UploadNextChunk(); + } + + private int ReadByteRange(byte[] buffer, long position, int offset, int count) { + int bytes = -1; + + // Read from image memory stream if it's defined + if (this.imageStream != null) { + this.imageStream.Seek(position, SeekOrigin.Begin); + return this.imageStream.Read(buffer, offset, count); + } + + // Open the file and read the specified part of it + if (this.fileStream == null) { + this.fileStream = this.info.OpenRead(); + } + + bytes = this.fileStream.Read(buffer, offset, count); + + return bytes; + } + + /// + /// Uploads the next chunk if there are more in queue. + /// + /// True/false if there are more chunks to be uploaded. + public bool UploadNextChunk() { + string url = this.uploadUrl; + + // Is there more chunks + if (this.chunk >= this.chunks) + return false; + + this.syncContext = SynchronizationContext.Current; + + // Add name, chunk and chunks to query string when we don't use multipart + if (!this.multipart) { + if (url.IndexOf('?') == -1) { + url += '?'; + } else { + url += '&'; + } + + url += "name=" + Uri.EscapeDataString(this.targetName); + + if (this.chunking) { + url += "&chunk=" + this.chunk; + url += "&chunks=" + this.chunks; + } + } + + this.req = WebRequest.Create(new Uri(HtmlPage.Document.DocumentUri, url)) as HttpWebRequest; + this.req.Method = "POST"; + + // Add custom headers + if (this.headers != null) { + foreach (string key in this.headers.Keys) { + if (this.headers[key] == null) + continue; + + switch (key.ToLower()) + { + // in silverlight 3, these are set by the web browser that hosts the Silverlight application. + // http://msdn.microsoft.com/en-us/library/system.net.httpwebrequest%28v=vs.95%29.aspx + case "connection": + case "content-length": + case "expect": + case "if-modified-since": + case "referer": + case "transfer-encoding": + case "user-agent": + break; + + // in silverlight this isn't supported, can not find reference to why not + case "range": + break; + + // in .NET Framework 3.5 and below, these are set by the system. + // http://msdn.microsoft.com/en-us/library/system.net.httpwebrequest%28v=VS.90%29.aspx + case "date": + case "host": + break; + + case "accept": + this.req.Accept = (string)this.headers[key]; + break; + + case "content-type": + this.req.ContentType = (string)this.headers[key]; + break; + default: + this.req.Headers[key] = (string)this.headers[key]; + break; + } + } + } + + IAsyncResult asyncResult = this.req.BeginGetRequestStream(new AsyncCallback(RequestStreamCallback), this.req); + + return true; + } + + /// + /// Cancels uploading the current file. + /// + public void CancelUpload() { + if (this.req != null) { + this.req.Abort(); + this.req = null; + DisposeStreams(); + } + } + + #region protected methods + + protected virtual void OnUploadComplete(UploadEventArgs e) { + DisposeStreams(); + + if (UploadComplete != null) + UploadComplete(this, e); + } + + protected virtual void OnUploadChunkComplete(UploadEventArgs e) { + if (UploadChunkComplete != null) + UploadChunkComplete(this, e); + } + + protected virtual void OnIOError(ErrorEventArgs e) { + DisposeStreams(); + + if (Error != null) + Error(this, e); + } + + protected virtual void OnProgress(ProgressEventArgs e) { + if (Progress != null) + Progress(this, e); + } + + #endregion + + #region private methods + + private void RequestStreamCallback(IAsyncResult ar) { + HttpWebRequest request = (HttpWebRequest) ar.AsyncState; + string boundary = "----pluploadboundary" + DateTime.Now.Ticks, dashdash = "--", crlf = "\r\n"; + Stream requestStream = null; + byte[] buffer = new byte[1048576], strBuff; + int bytes; + long loaded = 0, end = 0; + int percent, lastPercent = 0; + + try { + requestStream = request.EndGetRequestStream(ar); + + if (this.multipart) { + request.ContentType = "multipart/form-data; boundary=" + boundary; + + // Add name to multipart array + this.multipartParams["name"] = this.targetName; + + // Add chunking when needed + if (this.chunking) { + this.multipartParams["chunk"] = this.chunk; + this.multipartParams["chunks"] = this.chunks; + } + + // Append mutlipart parameters + foreach (KeyValuePair pair in this.multipartParams) { + strBuff = this.StrToByteArray(dashdash + boundary + crlf + + "Content-Disposition: form-data; name=\"" + pair.Key + '"' + crlf + crlf + + pair.Value + crlf + ); + + requestStream.Write(strBuff, 0, strBuff.Length); + } + + // Append multipart file header + strBuff = this.StrToByteArray( + dashdash + boundary + crlf + + "Content-Disposition: form-data; name=\"" + this.fileDataName + "\"; filename=\"" + this.name + '"' + + crlf + "Content-Type: " + this.mimeType + crlf + crlf + ); + + requestStream.Write(strBuff, 0, strBuff.Length); + } else { + request.ContentType = "application/octet-stream"; + } + + // Move to start + loaded = this.chunk * this.chunkSize; + + // Find end + end = (this.chunk + 1) * this.chunkSize; + if (end > this.Size) + end = this.Size; + + while (loaded < end && (bytes = ReadByteRange(buffer, loaded, 0, (int)(end - loaded < buffer.Length ? end - loaded : buffer.Length))) != 0) { + loaded += bytes; + percent = (int) Math.Round((double) loaded / (double) this.Size * 100.0); + + if (percent > lastPercent) { + syncContext.Post(delegate { + if (percent > lastPercent) { + this.OnProgress(new ProgressEventArgs(loaded, this.Size)); + lastPercent = percent; + } + }, this); + } + + requestStream.Write(buffer, 0, bytes); + requestStream.Flush(); + } + + // Append multipart file footer + if (this.multipart) { + strBuff = this.StrToByteArray(crlf + dashdash + boundary + dashdash + crlf); + requestStream.Write(strBuff, 0, strBuff.Length); + } + } catch (Exception ex) { + syncContext.Send(delegate { + this.OnIOError(new ErrorEventArgs(ex.Message, this.chunk, this.chunks)); + }, this); + } finally { + try { + if (requestStream != null) { + requestStream.Close(); + requestStream.Dispose(); + requestStream = null; + } + } catch (Exception ex) { + syncContext.Send(delegate { + this.OnIOError(new ErrorEventArgs(ex.Message, this.chunk, this.chunks)); + }, this); + } + } + + try { + request.BeginGetResponse(new AsyncCallback(ResponseCallback), request); + } + catch (WebException ex) + { + if (ex.Status != WebExceptionStatus.RequestCanceled) { + syncContext.Send(delegate { + this.OnIOError(new ErrorEventArgs(ex.Message, this.chunk, this.chunks)); + }, this); + } + } + catch (Exception ex) { + syncContext.Send(delegate { + this.OnIOError(new ErrorEventArgs(ex.Message, this.chunk, this.chunks)); + }, this); + } + } + + private void ResponseCallback(IAsyncResult ar) { + try + { + HttpWebRequest request = ar.AsyncState as HttpWebRequest; + + WebResponse response = request.EndGetResponse(ar); + + syncContext.Post(ExtractResponse, response); + } + catch (WebException ex) { + if (ex.Status != WebExceptionStatus.RequestCanceled) { + syncContext.Send(delegate { + this.OnIOError(new ErrorEventArgs(ex.Message, this.chunk, this.chunks)); + }, this); + } + } + catch (Exception ex) { + syncContext.Send(delegate { + this.OnIOError(new ErrorEventArgs(ex.Message, this.chunk, this.chunks)); + }, this); + } + } + + private void ExtractResponse(object state) { + HttpWebResponse response = state as HttpWebResponse; + StreamReader respReader = null; + Stream respStream = null; + string content; + + try { + respStream = response.GetResponseStream(); + + if (response.StatusCode == HttpStatusCode.OK) { + respReader = new StreamReader(respStream); + + if (respStream != null) { + content = respReader.ReadToEnd(); + } else + throw new Exception("Error could not open response stream."); + } else + throw new Exception("Error server returned status: " + ((int) response.StatusCode) + " " + response.StatusDescription); + + this.chunk++; + + syncContext.Send(delegate { + this.OnUploadChunkComplete(new UploadEventArgs(content, this.chunk - 1, this.chunks)); + }, this); + } catch (Exception ex) { + syncContext.Send(delegate { + this.OnIOError(new ErrorEventArgs(ex.Message, chunk, chunks)); + }, this); + } finally { + if (respStream != null) + respStream.Close(); + + if (respReader != null) + respReader.Close(); + + response.Close(); + } + } + + private void DisposeStreams() { + if (fileStream != null) { + fileStream.Dispose(); + fileStream = null; + } + + if (imageStream != null) { + imageStream.Dispose(); + imageStream = null; + } + } + + private void Debug(string msg) { + ((ScriptObject) HtmlPage.Window.Eval("console")).Invoke("log", new string[] {msg}); + } + + private Stream ResizeImage(Stream image_stream, int width, int height, int quality, ImageType type) { + try { + // Load the image as a writeablebitmap + WriteableBitmap writableBitmap; + BitmapImage bitmapImage = new BitmapImage(); + bitmapImage.SetSource(image_stream); + writableBitmap = new WriteableBitmap(bitmapImage); + + if (width == 0) { + width = writableBitmap.PixelWidth; + } + + if (height == 0) { + height = writableBitmap.PixelHeight; + } + + double scale = Math.Min((double) width / writableBitmap.PixelWidth, (double) height / writableBitmap.PixelHeight); + + // No resize needed + if (scale >= 1.0 && (quality == 0 || type != ImageType.Jpeg)) + return image_stream; + + if (quality == 0) { + quality = 90; + } + + // Setup shorter names and pixelbuffers + int w = writableBitmap.PixelWidth; + int h = writableBitmap.PixelHeight; + int[] p = writableBitmap.Pixels; + byte[][,] imageRaster = new byte[3][,]; // RGB colors + imageRaster[0] = new byte[w, h]; + imageRaster[1] = new byte[w, h]; + imageRaster[2] = new byte[w, h]; + + // Copy WriteableBitmap data into buffer for FluxJpeg + int i = 0; + for (int y = 0; y < h; y++) { + for (int x = 0; x < w; x++) { + int color = p[i++]; + + imageRaster[0][x, y] = (byte) (color >> 16); // R + imageRaster[1][x, y] = (byte) (color >> 8); // G + imageRaster[2][x, y] = (byte) (color); // B + } + } + + // Create new FluxJpeg image based on pixel data + Image jpegImage = new Image(new ColorModel { + colorspace = ColorSpace.RGB + }, imageRaster); + + ImageResizer resizer = new ImageResizer(jpegImage); + Image resizedImage; + + if (scale < 1.0) { + // Calc new proportional size + width = (int) Math.Round(writableBitmap.PixelWidth * scale); + height = (int) Math.Round(writableBitmap.PixelHeight * scale); + + // Resize the image + resizedImage = resizer.Resize(width, height, FluxJpeg.Core.Filtering.ResamplingFilters.LowpassAntiAlias); + } else { + resizedImage = jpegImage; + } + + Stream imageStream = new MemoryStream(); + + if (type == ImageType.Jpeg) { + // Encode the resized image as Jpeg + JpegEncoder jpegEncoder = new JpegEncoder(resizedImage, quality, imageStream); + jpegEncoder.Encode(); + } else { + int[] pixelBuffer = new int[resizedImage.Height * resizedImage.Width]; + byte[][,] resizedRaster = resizedImage.Raster; + + // Convert FJCore raster to PixelBuffer + for (int y = 0; y < resizedImage.Height; y++) { + for (int x = 0; x < resizedImage.Width; x++) { + int color = 0; + + color = color | resizedRaster[0][x, y] << 16; // R + color = color | resizedRaster[1][x, y] << 8; // G + color = color | resizedRaster[2][x, y]; // B + + pixelBuffer[(y * resizedImage.Width) + x] = color; + } + } + + // Encode the resized image as Png + PngEncoder pngEncoder = new PngEncoder(pixelBuffer, resizedImage.Width, resizedImage.Height, false, PngEncoder.FILTER_NONE, Deflater.BEST_COMPRESSION); + byte[] pngBuffer = pngEncoder.pngEncode(); + imageStream.Write(pngBuffer, 0, pngBuffer.Length); + } + + return imageStream; + } catch { + // Ignore the error and let the server resize the image + } + + return image_stream; + } + + private byte[] StrToByteArray(string str) { + System.Text.UTF8Encoding encoding = new System.Text.UTF8Encoding(); + + return encoding.GetBytes(str); + } + + #endregion + } + + /// + /// Upload event arguments class. + /// + public class UploadEventArgs : EventArgs { + #region private fields + private string response; + private long chunk; + private int chunks; + #endregion + + /// + /// Main constructor for the upload event. + /// + /// Response contents as a string. + public UploadEventArgs(string response) : this(response, 0, 0) { + } + + /// + /// Main constructor for the upload event. + /// + /// Response contents as a string. + /// Current chunk number. + /// Total chunks. + public UploadEventArgs(string response, long chunk, int chunks) { + this.response = response; + this.chunk = chunk; + this.chunks = chunks; + } + + /// Response from upload request. + public string Response { + get { return response; } + } + + /// Chunk number. + public long Chunk { + get { return chunk; } + } + + /// Total number of chunks. + public int Chunks { + get { return chunks; } + } + } + + /// + /// Error event arguments class. + /// + public class ErrorEventArgs : EventArgs { + #region private fields + private string message; + private long chunk; + private int chunks; + #endregion + + /// + /// Main constructor for the error event. + /// + /// Error message. + public ErrorEventArgs(string message) : this(message, 0, 0) { + this.message = message; + } + + /// + /// Main constructor for the error event. + /// + /// Error message. + /// Current chunk number. + /// Total chunks. + public ErrorEventArgs(string message, long chunk, int chunks) { + this.message = message; + this.chunk = chunk; + this.chunks = chunks; + } + + /// Chunk number. + public long Chunk { + get { return chunk; } + } + + /// Total number of chunks. + public int Chunks { + get { return chunks; } + } + + /// Error message. + public string Message { + get { return message; } + } + } + + /// + /// Progress event arguments class. + /// + public class ProgressEventArgs : EventArgs { + #region private fields + private long loaded, total; + #endregion + + /// + /// Main constructor for the progress events args. + /// + /// Number of bytes uploaded. + /// Total bytes to upload. + public ProgressEventArgs(long loaded, long total) { + this.loaded = loaded; + this.total = total; + } + + /// Total bytes to upload. + public long Total { + get { return total; } + } + + /// Number of bytes upload so far. + public long Loaded { + get { return loaded; } + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/Page.xaml b/debian/missing-sources/plupload/csharp/Plupload/Page.xaml new file mode 100644 index 0000000..6236e84 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/Page.xaml @@ -0,0 +1,7 @@ + + + \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/Page.xaml.cs b/debian/missing-sources/plupload/csharp/Plupload/Page.xaml.cs new file mode 100644 index 0000000..66bfeef --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/Page.xaml.cs @@ -0,0 +1,230 @@ +/** + * Page.xaml.cs + * + * Copyright 2009, Moxiecode Systems AB + * Released under GPL License. + * + * License: http://www.plupload.com/license + * Contributing: http://www.plupload.com/contributing + */ + +using System; +using System.Windows; +using System.Windows.Controls; +using System.Windows.Documents; +using System.Windows.Ink; +using System.Windows.Input; +using System.Windows.Media; +using System.Windows.Media.Animation; +using System.Windows.Shapes; +using System.Windows.Browser; +using System.Net; +using System.IO; +using System.Collections.Generic; +using System.Threading; +using Moxiecode.Plupload; + +namespace Moxiecode.Plupload { + /// + /// Partial page class for the Silverlight page. + /// + public partial class Page : UserControl { + #region private fields + private Dictionary files; + private int idCount = 0; + private FileReference currentFile; + private string id, filter; + private bool multiselect; + private bool disabled = false; + #endregion + + /// + /// Main constructor. + /// + /// Silverlight init params. + public Page(IDictionary init_params) { + InitializeComponent(); + + HtmlPage.RegisterScriptableObject("Upload", this); + + this.files = new Dictionary(); + this.id = init_params["id"]; + this.filter = init_params["filter"]; + this.multiselect = Convert.ToBoolean(init_params["multiselect"]); + + this.FireEvent("Init"); + this.MouseLeftButtonUp += new MouseButtonEventHandler(OnClick); + + this.MouseLeftButtonDown += new MouseButtonEventHandler(OnMouseLeftButtonDown); + this.MouseEnter += new MouseEventHandler(OnMouseEnter); + this.MouseLeave += new MouseEventHandler(OnMouseLeave); + } + + private void OnClick(object sender, MouseEventArgs e) { + if (this.disabled) { + return; + } + + OpenFileDialog dlg = new OpenFileDialog(); + + this.FireEvent("StartSelectFiles"); + + try { + dlg.Multiselect = this.multiselect; + dlg.Filter = this.filter; + + if ((bool) dlg.ShowDialog()) { + foreach (FileInfo file in dlg.Files) { + FileReference uploadFile = new FileReference("u" + this.idCount++, file); + + uploadFile.UploadChunkComplete += delegate(object up_sender, UploadEventArgs args) { + FileReference evtFile = (FileReference) up_sender; + + this.FireEvent("UploadChunkSuccessful", evtFile.Id, args.Chunk, args.Chunks, args.Response); + }; + + uploadFile.UploadComplete += delegate(object up_sender, UploadEventArgs args) { + FileReference evtFile = (FileReference) up_sender; + + this.FireEvent("UploadSuccessful", evtFile.Id, args.Response); + }; + + uploadFile.Error += delegate(object up_sender, ErrorEventArgs args) { + FileReference evtFile = (FileReference) up_sender; + + this.FireEvent("UploadChunkError", evtFile.Id, args.Chunk, args.Chunks, args.Message); + }; + + uploadFile.Progress += delegate(object up_sender, ProgressEventArgs args) { + FileReference evtFile = (FileReference) up_sender; + + this.FireEvent("UploadFileProgress", evtFile.Id, args.Loaded, args.Total); + }; + + this.FireEvent("SelectFile", uploadFile.Id, uploadFile.Name, uploadFile.Size); + this.files[uploadFile.Id] = uploadFile; + } + + this.FireEvent("SelectSuccessful"); + } else + this.FireEvent("SelectCancelled"); + } catch (Exception ex) { + this.FireEvent("SelectError", ex.Message); + } + } + + + private void OnMouseLeftButtonDown(object sender, MouseEventArgs e) { + this.FireEvent("MouseLeftButtonDown"); + } + + private void OnMouseEnter(object sender, MouseEventArgs e) { + this.FireEvent("MouseEnter"); + } + + private void OnMouseLeave(object sender, MouseEventArgs e) { + this.FireEvent("MouseLeave"); + } + + /// + /// Reference to page level plupload.silverlight script object. + /// + public ScriptObject PluploadScriptObject { + get { return ((ScriptObject) HtmlPage.Window.Eval("plupload.silverlight")); } + } + + /// + /// Fires a specific event to the page level multi upload script. + /// + /// Event name to fire. + public void FireEvent(string name) { + this.PluploadScriptObject.Invoke("trigger", new string[] { this.id, name }); + } + + /// + /// Fires a specific event to the page level multi upload script. + /// + /// Event name to fire. + /// Numerous parameters to send. + public void FireEvent(string name, params object[] paramlist) { + List args = new List(paramlist); + + args.Insert(0, name); + args.Insert(0, this.id); + + this.PluploadScriptObject.Invoke("trigger", args.ToArray()); + } + + [ScriptableMember] + /// + /// Uploads a specific file by id to the specific url and using a chunks. + /// + /// File id to upload. + /// Url to upload to. + /// Chunk size to use. + public void UploadFile(string id, string upload_url, string json_settings) { + if (this.files.ContainsKey(id)) { + FileReference file = this.files[id]; + + this.currentFile = file; + file.Upload(upload_url, json_settings); + } + } + + [ScriptableMember] + /// + /// Removes the specified file by id. + /// + /// File id to remove. + public void RemoveFile(string id) { + if (this.files.ContainsKey(id)) + this.files[id] = null; + } + + [ScriptableMember] + /// + /// Clears all files. + /// + public void ClearFiles() { + this.files = new Dictionary(); + } + + [ScriptableMember] + /// + /// Uploads the next chunk of the current file. Returns true/false if there is more chunks. + /// + /// true/false if there is more chunks + public bool UploadNextChunk() { + if (this.currentFile != null) + return this.currentFile.UploadNextChunk(); + + return false; + } + + [ScriptableMember] + /// + /// Cancel upload. + /// + public void CancelUpload() { + if (this.currentFile != null) + this.currentFile.CancelUpload(); + } + + [ScriptableMember] + /// + /// Disable dialog trigger. + /// + public void DisableBrowse(bool disabled = true) + { + this.disabled = disabled; + } + + /// + /// Send debug message to firebug console. + /// + /// Message to write. + private void Debug(string msg) { + ((ScriptObject) HtmlPage.Window.Eval("console")).Invoke("log", new string[] { msg }); + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/Plupload.csproj b/debian/missing-sources/plupload/csharp/Plupload/Plupload.csproj new file mode 100644 index 0000000..b40f00f --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/Plupload.csproj @@ -0,0 +1,222 @@ + + + + v3.5 + + + Debug + AnyCPU + Plupload + plupload.silverlight + Moxiecode.Plupload + false + 2.1.1535.0 + {95F0DEE8-DE7A-46C5-9DCC-0570B0FC4643} + Library + true + v4.0 + Default.html + true + plupload.silverlight.xap + true + Properties\AppManifest.xml + Moxiecode.Plupload.App + true + Default.html + SILVERLIGHT + true + C:\Users\spocke\AppData\Roaming\ICSharpCode/SharpDevelop3.0\Settings.SourceAnalysis + OnBuildSuccess + False + false + + + + + 3.5 + Silverlight + $(TargetFrameworkVersion) + false + publish\ + true + Disk + false + Foreground + 7 + Days + false + false + true + 0 + 1.0.0.%2a + false + true + true + + + true + full + false + bin\Debug\ + TRACE;DEBUG;SILVERLIGHT + prompt + 4 + + + PdbOnly + true + bin\ + SILVERLIGHT + prompt + 4 + false + + + False + + + False + Auto + 4194304 + AnyCPU + 4096 + + + + + + + + + + + MSBuild:Compile + Designer + MSBuild:Compile + Designer + + + App.xaml + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Code + + + Page.xaml + + + + + + + + + + + + + + + MSBuild:Compile + Designer + MSBuild:Compile + Designer + + + + + + + + + + False + .NET Framework 3.5 SP1 Client Profile + false + + + False + .NET Framework 3.5 SP1 + true + + + False + Microsoft Visual Basic PowerPacks 10.0 + true + + + False + Windows Installer 3.1 + true + + + + + + + + + + + + copy /Y plupload.silverlight.xap ..\..\..\..\js\plupload.silverlight.xap + + + + + + + + + diff --git a/debian/missing-sources/plupload/csharp/Plupload/Plupload.sln b/debian/missing-sources/plupload/csharp/Plupload/Plupload.sln new file mode 100644 index 0000000..47a792e --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/Plupload.sln @@ -0,0 +1,20 @@ + +Microsoft Visual Studio Solution File, Format Version 11.00 +# Visual Studio 2010 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Plupload", "Plupload.csproj", "{95F0DEE8-DE7A-46C5-9DCC-0570B0FC4643}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {95F0DEE8-DE7A-46C5-9DCC-0570B0FC4643}.Debug|Any CPU.ActiveCfg = Release|Any CPU + {95F0DEE8-DE7A-46C5-9DCC-0570B0FC4643}.Debug|Any CPU.Build.0 = Release|Any CPU + {95F0DEE8-DE7A-46C5-9DCC-0570B0FC4643}.Release|Any CPU.ActiveCfg = Release|Any CPU + {95F0DEE8-DE7A-46C5-9DCC-0570B0FC4643}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/Adler32.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/Adler32.cs new file mode 100644 index 0000000..febc82e --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/Adler32.cs @@ -0,0 +1,216 @@ +// Adler32.cs - Computes Adler32 data checksum of a data stream +// Copyright (C) 2001 Mike Krueger +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +using System; + +namespace Plupload.PngEncoder { + + /// + /// Computes Adler32 checksum for a stream of data. An Adler32 + /// checksum is not as reliable as a CRC32 checksum, but a lot faster to + /// compute. + /// + /// The specification for Adler32 may be found in RFC 1950. + /// ZLIB Compressed Data Format Specification version 3.3) + /// + /// + /// From that document: + /// + /// "ADLER32 (Adler-32 checksum) + /// This contains a checksum value of the uncompressed data + /// (excluding any dictionary data) computed according to Adler-32 + /// algorithm. This algorithm is a 32-bit extension and improvement + /// of the Fletcher algorithm, used in the ITU-T X.224 / ISO 8073 + /// standard. + /// + /// Adler-32 is composed of two sums accumulated per byte: s1 is + /// the sum of all bytes, s2 is the sum of all s1 values. Both sums + /// are done modulo 65521. s1 is initialized to 1, s2 to zero. The + /// Adler-32 checksum is stored as s2*65536 + s1 in most- + /// significant-byte first (network) order." + /// + /// "8.2. The Adler-32 algorithm + /// + /// The Adler-32 algorithm is much faster than the CRC32 algorithm yet + /// still provides an extremely low probability of undetected errors. + /// + /// The modulo on unsigned long accumulators can be delayed for 5552 + /// bytes, so the modulo operation time is negligible. If the bytes + /// are a, b, c, the second sum is 3a + 2b + c + 3, and so is position + /// and order sensitive, unlike the first sum, which is just a + /// checksum. That 65521 is prime is important to avoid a possible + /// large class of two-byte errors that leave the check unchanged. + /// (The Fletcher checksum uses 255, which is not prime and which also + /// makes the Fletcher check insensitive to single byte changes 0 - + /// 255.) + /// + /// The sum s1 is initialized to 1 instead of zero to make the length + /// of the sequence part of s2, so that the length does not have to be + /// checked separately. (Any sequence of zeroes has a Fletcher + /// checksum of zero.)" + /// + /// + /// + public sealed class Adler32 : IChecksum { + /// + /// largest prime smaller than 65536 + /// + const uint BASE = 65521; + + /// + /// Returns the Adler32 data checksum computed so far. + /// + public long Value { + get { + return checksum; + } + } + + /// + /// Creates a new instance of the Adler32 class. + /// The checksum starts off with a value of 1. + /// + public Adler32() { + Reset(); + } + + /// + /// Resets the Adler32 checksum to the initial value. + /// + public void Reset() { + checksum = 1; + } + + /// + /// Updates the checksum with a byte value. + /// + /// + /// The data value to add. The high byte of the int is ignored. + /// + public void Update(int value) { + // We could make a length 1 byte array and call update again, but I + // would rather not have that overhead + uint s1 = checksum & 0xFFFF; + uint s2 = checksum >> 16; + + s1 = (s1 + ((uint) value & 0xFF)) % BASE; + s2 = (s1 + s2) % BASE; + + checksum = (s2 << 16) + s1; + } + + /// + /// Updates the checksum with an array of bytes. + /// + /// + /// The source of the data to update with. + /// + public void Update(byte[] buffer) { + if (buffer == null) { + throw new ArgumentNullException("buffer"); + } + + Update(buffer, 0, buffer.Length); + } + + /// + /// Updates the checksum with the bytes taken from the array. + /// + /// + /// an array of bytes + /// + /// + /// the start of the data used for this update + /// + /// + /// the number of bytes to use for this update + /// + public void Update(byte[] buffer, int offset, int count) { + if (buffer == null) { + throw new ArgumentNullException("buffer"); + } + + if (offset < 0) { + + throw new ArgumentOutOfRangeException("offset"); + + } + + if (count < 0) { + + throw new ArgumentOutOfRangeException("count"); + + } + + if (offset >= buffer.Length) { + throw new ArgumentOutOfRangeException("offset"); + + } + + if (offset + count > buffer.Length) { + throw new ArgumentOutOfRangeException("count"); + } + + //(By Per Bothner) + uint s1 = checksum & 0xFFFF; + uint s2 = checksum >> 16; + + while (count > 0) { + // We can defer the modulo operation: + // s1 maximally grows from 65521 to 65521 + 255 * 3800 + // s2 maximally grows by 3800 * median(s1) = 2090079800 < 2^31 + int n = 3800; + if (n > count) { + n = count; + } + count -= n; + while (--n >= 0) { + s1 = s1 + (uint) (buffer[offset++] & 0xff); + s2 = s2 + s1; + } + s1 %= BASE; + s2 %= BASE; + } + + checksum = (s2 << 16) | s1; + } + + #region Instance Fields + uint checksum; + #endregion + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/CRC32.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/CRC32.cs new file mode 100644 index 0000000..685fc57 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/CRC32.cs @@ -0,0 +1,213 @@ +// CRC32.cs - Computes CRC32 data checksum of a data stream +// Copyright (C) 2001 Mike Krueger +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +using System; + +namespace Plupload.PngEncoder { + + /// + /// Generate a table for a byte-wise 32-bit CRC calculation on the polynomial: + /// x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. + /// + /// Polynomials over GF(2) are represented in binary, one bit per coefficient, + /// with the lowest powers in the most significant bit. Then adding polynomials + /// is just exclusive-or, and multiplying a polynomial by x is a right shift by + /// one. If we call the above polynomial p, and represent a byte as the + /// polynomial q, also with the lowest power in the most significant bit (so the + /// byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, + /// where a mod b means the remainder after dividing a by b. + /// + /// This calculation is done using the shift-register method of multiplying and + /// taking the remainder. The register is initialized to zero, and for each + /// incoming bit, x^32 is added mod p to the register if the bit is a one (where + /// x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by + /// x (which is shifting right by one and adding x^32 mod p if the bit shifted + /// out is a one). We start with the highest power (least significant bit) of + /// q and repeat for all eight bits of q. + /// + /// The table is simply the CRC of all possible eight bit values. This is all + /// the information needed to generate CRC's on data a byte at a time for all + /// combinations of CRC register values and incoming bytes. + /// + public sealed class Crc32 : IChecksum { + const uint CrcSeed = 0xFFFFFFFF; + + readonly static uint[] CrcTable = new uint[] { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, + 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, + 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, + 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, + 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, + 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, + 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, + 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, + 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, + 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, + 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, + 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, + 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, + 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, + 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, + 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, + 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, + 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, + 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, + 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, + 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, + 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, + 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, + 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, + 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, + 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, + 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, + 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, + 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, + 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, + 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, + 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, + 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, + 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, + 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, + 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, + 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, + 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, + 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, + 0x2D02EF8D + }; + + internal static uint ComputeCrc32(uint oldCrc, byte value) { + return (uint) (Crc32.CrcTable[(oldCrc ^ value) & 0xFF] ^ (oldCrc >> 8)); + } + + /// + /// The crc data checksum so far. + /// + uint crc; + + /// + /// Returns the CRC32 data checksum computed so far. + /// + public long Value { + get { + return (long) crc; + } + set { + crc = (uint) value; + } + } + + /// + /// Resets the CRC32 data checksum as if no update was ever called. + /// + public void Reset() { + crc = 0; + } + + /// + /// Updates the checksum with the int bval. + /// + /// + /// the byte is taken as the lower 8 bits of value + /// + public void Update(int value) { + crc ^= CrcSeed; + crc = CrcTable[(crc ^ value) & 0xFF] ^ (crc >> 8); + crc ^= CrcSeed; + } + + /// + /// Updates the checksum with the bytes taken from the array. + /// + /// + /// buffer an array of bytes + /// + public void Update(byte[] buffer) { + if (buffer == null) { + throw new ArgumentNullException("buffer"); + } + + Update(buffer, 0, buffer.Length); + } + + /// + /// Adds the byte array to the data checksum. + /// + /// + /// The buffer which contains the data + /// + /// + /// The offset in the buffer where the data starts + /// + /// + /// The number of data bytes to update the CRC with. + /// + public void Update(byte[] buffer, int offset, int count) { + if (buffer == null) { + throw new ArgumentNullException("buffer"); + } + + if (count < 0) { + + throw new ArgumentOutOfRangeException("count"); + } + + if (offset < 0 || offset + count > buffer.Length) { + throw new ArgumentOutOfRangeException("offset"); + } + + crc ^= CrcSeed; + + while (--count >= 0) { + crc = CrcTable[(crc ^ buffer[offset++]) & 0xFF] ^ (crc >> 8); + } + + crc ^= CrcSeed; + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/Deflater.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/Deflater.cs new file mode 100644 index 0000000..f4a2094 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/Deflater.cs @@ -0,0 +1,543 @@ +// Deflater.cs +// +// Copyright (C) 2001 Mike Krueger +// Copyright (C) 2004 John Reilly +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +using System; + +namespace Plupload.PngEncoder { + + /// + /// This is the Deflater class. The deflater class compresses input + /// with the deflate algorithm described in RFC 1951. It has several + /// compression levels and three different strategies described below. + /// + /// This class is not thread safe. This is inherent in the API, due + /// to the split of deflate and setInput. + /// + /// author of the original java version : Jochen Hoenicke + /// + public class Deflater { + #region Deflater Documentation + /* + * The Deflater can do the following state transitions: + * + * (1) -> INIT_STATE ----> INIT_FINISHING_STATE ---. + * / | (2) (5) | + * / v (5) | + * (3)| SETDICT_STATE ---> SETDICT_FINISHING_STATE |(3) + * \ | (3) | ,--------' + * | | | (3) / + * v v (5) v v + * (1) -> BUSY_STATE ----> FINISHING_STATE + * | (6) + * v + * FINISHED_STATE + * \_____________________________________/ + * | (7) + * v + * CLOSED_STATE + * + * (1) If we should produce a header we start in INIT_STATE, otherwise + * we start in BUSY_STATE. + * (2) A dictionary may be set only when we are in INIT_STATE, then + * we change the state as indicated. + * (3) Whether a dictionary is set or not, on the first call of deflate + * we change to BUSY_STATE. + * (4) -- intentionally left blank -- :) + * (5) FINISHING_STATE is entered, when flush() is called to indicate that + * there is no more INPUT. There are also states indicating, that + * the header wasn't written yet. + * (6) FINISHED_STATE is entered, when everything has been flushed to the + * internal pending output buffer. + * (7) At any time (7) + * + */ + #endregion + #region Public Constants + /// + /// The best and slowest compression level. This tries to find very + /// long and distant string repetitions. + /// + public const int BEST_COMPRESSION = 9; + + /// + /// The worst but fastest compression level. + /// + public const int BEST_SPEED = 1; + + /// + /// The default compression level. + /// + public const int DEFAULT_COMPRESSION = -1; + + /// + /// This level won't compress at all but output uncompressed blocks. + /// + public const int NO_COMPRESSION = 0; + + /// + /// The compression method. This is the only method supported so far. + /// There is no need to use this constant at all. + /// + public const int DEFLATED = 8; + #endregion + #region Local Constants + private const int IS_SETDICT = 0x01; + private const int IS_FLUSHING = 0x04; + private const int IS_FINISHING = 0x08; + + private const int INIT_STATE = 0x00; + private const int SETDICT_STATE = 0x01; + // private static int INIT_FINISHING_STATE = 0x08; + // private static int SETDICT_FINISHING_STATE = 0x09; + private const int BUSY_STATE = 0x10; + private const int FLUSHING_STATE = 0x14; + private const int FINISHING_STATE = 0x1c; + private const int FINISHED_STATE = 0x1e; + private const int CLOSED_STATE = 0x7f; + #endregion + #region Constructors + /// + /// Creates a new deflater with default compression level. + /// + public Deflater() + : this(DEFAULT_COMPRESSION, false) { + + } + + /// + /// Creates a new deflater with given compression level. + /// + /// + /// the compression level, a value between NO_COMPRESSION + /// and BEST_COMPRESSION, or DEFAULT_COMPRESSION. + /// + /// if lvl is out of range. + public Deflater(int level) + : this(level, false) { + + } + + /// + /// Creates a new deflater with given compression level. + /// + /// + /// the compression level, a value between NO_COMPRESSION + /// and BEST_COMPRESSION. + /// + /// + /// true, if we should suppress the Zlib/RFC1950 header at the + /// beginning and the adler checksum at the end of the output. This is + /// useful for the GZIP/PKZIP formats. + /// + /// if lvl is out of range. + public Deflater(int level, bool noZlibHeaderOrFooter) { + if (level == DEFAULT_COMPRESSION) { + level = 6; + } else if (level < NO_COMPRESSION || level > BEST_COMPRESSION) { + throw new ArgumentOutOfRangeException("level"); + } + + pending = new DeflaterPending(); + engine = new DeflaterEngine(pending); + this.noZlibHeaderOrFooter = noZlibHeaderOrFooter; + SetStrategy(DeflateStrategy.Default); + SetLevel(level); + Reset(); + } + #endregion + + /// + /// Resets the deflater. The deflater acts afterwards as if it was + /// just created with the same compression level and strategy as it + /// had before. + /// + public void Reset() { + state = (noZlibHeaderOrFooter ? BUSY_STATE : INIT_STATE); + totalOut = 0; + pending.Reset(); + engine.Reset(); + } + + /// + /// Gets the current adler checksum of the data that was processed so far. + /// + public int Adler { + get { + return engine.Adler; + } + } + + /// + /// Gets the number of input bytes processed so far. + /// + public long TotalIn { + get { + return engine.TotalIn; + } + } + + /// + /// Gets the number of output bytes so far. + /// + public long TotalOut { + get { + return totalOut; + } + } + + /// + /// Flushes the current input block. Further calls to deflate() will + /// produce enough output to inflate everything in the current input + /// block. This is not part of Sun's JDK so I have made it package + /// private. It is used by DeflaterOutputStream to implement + /// flush(). + /// + public void Flush() { + state |= IS_FLUSHING; + } + + /// + /// Finishes the deflater with the current input block. It is an error + /// to give more input after this method was called. This method must + /// be called to force all bytes to be flushed. + /// + public void Finish() { + state |= (IS_FLUSHING | IS_FINISHING); + } + + /// + /// Returns true if the stream was finished and no more output bytes + /// are available. + /// + public bool IsFinished { + get { + return (state == FINISHED_STATE) && pending.IsFlushed; + } + } + + /// + /// Returns true, if the input buffer is empty. + /// You should then call setInput(). + /// NOTE: This method can also return true when the stream + /// was finished. + /// + public bool IsNeedingInput { + get { + return engine.NeedsInput(); + } + } + + /// + /// Sets the data which should be compressed next. This should be only + /// called when needsInput indicates that more input is needed. + /// If you call setInput when needsInput() returns false, the + /// previous input that is still pending will be thrown away. + /// The given byte array should not be changed, before needsInput() returns + /// true again. + /// This call is equivalent to setInput(input, 0, input.length). + /// + /// + /// the buffer containing the input data. + /// + /// + /// if the buffer was finished() or ended(). + /// + public void SetInput(byte[] input) { + SetInput(input, 0, input.Length); + } + + /// + /// Sets the data which should be compressed next. This should be + /// only called when needsInput indicates that more input is needed. + /// The given byte array should not be changed, before needsInput() returns + /// true again. + /// + /// + /// the buffer containing the input data. + /// + /// + /// the start of the data. + /// + /// + /// the number of data bytes of input. + /// + /// + /// if the buffer was Finish()ed or if previous input is still pending. + /// + public void SetInput(byte[] input, int offset, int count) { + if ((state & IS_FINISHING) != 0) { + throw new InvalidOperationException("Finish() already called"); + } + engine.SetInput(input, offset, count); + } + + /// + /// Sets the compression level. There is no guarantee of the exact + /// position of the change, but if you call this when needsInput is + /// true the change of compression level will occur somewhere near + /// before the end of the so far given input. + /// + /// + /// the new compression level. + /// + public void SetLevel(int level) { + if (level == DEFAULT_COMPRESSION) { + level = 6; + } else if (level < NO_COMPRESSION || level > BEST_COMPRESSION) { + throw new ArgumentOutOfRangeException("level"); + } + + if (this.level != level) { + this.level = level; + engine.SetLevel(level); + } + } + + /// + /// Get current compression level + /// + /// Returns the current compression level + public int GetLevel() { + return level; + } + + /// + /// Sets the compression strategy. Strategy is one of + /// DEFAULT_STRATEGY, HUFFMAN_ONLY and FILTERED. For the exact + /// position where the strategy is changed, the same as for + /// SetLevel() applies. + /// + /// + /// The new compression strategy. + /// + public void SetStrategy(DeflateStrategy strategy) { + engine.Strategy = strategy; + } + + /// + /// Deflates the current input block with to the given array. + /// + /// + /// The buffer where compressed data is stored + /// + /// + /// The number of compressed bytes added to the output, or 0 if either + /// IsNeedingInput() or IsFinished returns true or length is zero. + /// + public int Deflate(byte[] output) { + return Deflate(output, 0, output.Length); + } + + /// + /// Deflates the current input block to the given array. + /// + /// + /// Buffer to store the compressed data. + /// + /// + /// Offset into the output array. + /// + /// + /// The maximum number of bytes that may be stored. + /// + /// + /// The number of compressed bytes added to the output, or 0 if either + /// needsInput() or finished() returns true or length is zero. + /// + /// + /// If Finish() was previously called. + /// + /// + /// If offset or length don't match the array length. + /// + public int Deflate(byte[] output, int offset, int length) { + int origLength = length; + + if (state == CLOSED_STATE) { + throw new InvalidOperationException("Deflater closed"); + } + + if (state < BUSY_STATE) { + // output header + int header = (DEFLATED + + ((DeflaterConstants.MAX_WBITS - 8) << 4)) << 8; + int level_flags = (level - 1) >> 1; + if (level_flags < 0 || level_flags > 3) { + level_flags = 3; + } + header |= level_flags << 6; + if ((state & IS_SETDICT) != 0) { + // Dictionary was set + header |= DeflaterConstants.PRESET_DICT; + } + header += 31 - (header % 31); + + pending.WriteShortMSB(header); + if ((state & IS_SETDICT) != 0) { + int chksum = engine.Adler; + engine.ResetAdler(); + pending.WriteShortMSB(chksum >> 16); + pending.WriteShortMSB(chksum & 0xffff); + } + + state = BUSY_STATE | (state & (IS_FLUSHING | IS_FINISHING)); + } + + for (; ; ) { + int count = pending.Flush(output, offset, length); + offset += count; + totalOut += count; + length -= count; + + if (length == 0 || state == FINISHED_STATE) { + break; + } + + if (!engine.Deflate((state & IS_FLUSHING) != 0, (state & IS_FINISHING) != 0)) { + if (state == BUSY_STATE) { + // We need more input now + return origLength - length; + } else if (state == FLUSHING_STATE) { + if (level != NO_COMPRESSION) { + /* We have to supply some lookahead. 8 bit lookahead + * is needed by the zlib inflater, and we must fill + * the next byte, so that all bits are flushed. + */ + int neededbits = 8 + ((-pending.BitCount) & 7); + while (neededbits > 0) { + /* write a static tree block consisting solely of + * an EOF: + */ + pending.WriteBits(2, 10); + neededbits -= 10; + } + } + state = BUSY_STATE; + } else if (state == FINISHING_STATE) { + pending.AlignToByte(); + + // Compressed data is complete. Write footer information if required. + if (!noZlibHeaderOrFooter) { + int adler = engine.Adler; + pending.WriteShortMSB(adler >> 16); + pending.WriteShortMSB(adler & 0xffff); + } + state = FINISHED_STATE; + } + } + } + return origLength - length; + } + + /// + /// Sets the dictionary which should be used in the deflate process. + /// This call is equivalent to setDictionary(dict, 0, dict.Length). + /// + /// + /// the dictionary. + /// + /// + /// if SetInput () or Deflate () were already called or another dictionary was already set. + /// + public void SetDictionary(byte[] dictionary) { + SetDictionary(dictionary, 0, dictionary.Length); + } + + /// + /// Sets the dictionary which should be used in the deflate process. + /// The dictionary is a byte array containing strings that are + /// likely to occur in the data which should be compressed. The + /// dictionary is not stored in the compressed output, only a + /// checksum. To decompress the output you need to supply the same + /// dictionary again. + /// + /// + /// The dictionary data + /// + /// + /// The index where dictionary information commences. + /// + /// + /// The number of bytes in the dictionary. + /// + /// + /// If SetInput () or Deflate() were already called or another dictionary was already set. + /// + public void SetDictionary(byte[] dictionary, int index, int count) { + if (state != INIT_STATE) { + throw new InvalidOperationException(); + } + + state = SETDICT_STATE; + engine.SetDictionary(dictionary, index, count); + } + + #region Instance Fields + /// + /// Compression level. + /// + int level; + + /// + /// If true no Zlib/RFC1950 headers or footers are generated + /// + bool noZlibHeaderOrFooter; + + /// + /// The current state. + /// + int state; + + /// + /// The total bytes of output written. + /// + long totalOut; + + /// + /// The pending output. + /// + DeflaterPending pending; + + /// + /// The deflater engine. + /// + DeflaterEngine engine; + #endregion + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterConstants.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterConstants.cs new file mode 100644 index 0000000..46587b6 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterConstants.cs @@ -0,0 +1,184 @@ +// DeflaterConstants.cs +// +// Copyright (C) 2001 Mike Krueger +// Copyright (C) 2004 John Reilly +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +using System; + +namespace Plupload.PngEncoder { + + /// + /// This class contains constants used for deflation. + /// + public class DeflaterConstants { + /// + /// Set to true to enable debugging + /// + public const bool DEBUGGING = false; + + /// + /// Written to Zip file to identify a stored block + /// + public const int STORED_BLOCK = 0; + + /// + /// Identifies static tree in Zip file + /// + public const int STATIC_TREES = 1; + + /// + /// Identifies dynamic tree in Zip file + /// + public const int DYN_TREES = 2; + + /// + /// Header flag indicating a preset dictionary for deflation + /// + public const int PRESET_DICT = 0x20; + + /// + /// Sets internal buffer sizes for Huffman encoding + /// + public const int DEFAULT_MEM_LEVEL = 8; + + /// + /// Internal compression engine constant + /// + public const int MAX_MATCH = 258; + + /// + /// Internal compression engine constant + /// + public const int MIN_MATCH = 3; + + /// + /// Internal compression engine constant + /// + public const int MAX_WBITS = 15; + + /// + /// Internal compression engine constant + /// + public const int WSIZE = 1 << MAX_WBITS; + + /// + /// Internal compression engine constant + /// + public const int WMASK = WSIZE - 1; + + /// + /// Internal compression engine constant + /// + public const int HASH_BITS = DEFAULT_MEM_LEVEL + 7; + + /// + /// Internal compression engine constant + /// + public const int HASH_SIZE = 1 << HASH_BITS; + + /// + /// Internal compression engine constant + /// + public const int HASH_MASK = HASH_SIZE - 1; + + /// + /// Internal compression engine constant + /// + public const int HASH_SHIFT = (HASH_BITS + MIN_MATCH - 1) / MIN_MATCH; + + /// + /// Internal compression engine constant + /// + public const int MIN_LOOKAHEAD = MAX_MATCH + MIN_MATCH + 1; + + /// + /// Internal compression engine constant + /// + public const int MAX_DIST = WSIZE - MIN_LOOKAHEAD; + + /// + /// Internal compression engine constant + /// + public const int PENDING_BUF_SIZE = 1 << (DEFAULT_MEM_LEVEL + 8); + + /// + /// Internal compression engine constant + /// + public static int MAX_BLOCK_SIZE = Math.Min(65535, PENDING_BUF_SIZE - 5); + + /// + /// Internal compression engine constant + /// + public const int DEFLATE_STORED = 0; + + /// + /// Internal compression engine constant + /// + public const int DEFLATE_FAST = 1; + + /// + /// Internal compression engine constant + /// + public const int DEFLATE_SLOW = 2; + + /// + /// Internal compression engine constant + /// + public static int[] GOOD_LENGTH = { 0, 4, 4, 4, 4, 8, 8, 8, 32, 32 }; + + /// + /// Internal compression engine constant + /// + public static int[] MAX_LAZY = { 0, 4, 5, 6, 4, 16, 16, 32, 128, 258 }; + + /// + /// Internal compression engine constant + /// + public static int[] NICE_LENGTH = { 0, 8, 16, 32, 16, 32, 128, 128, 258, 258 }; + + /// + /// Internal compression engine constant + /// + public static int[] MAX_CHAIN = { 0, 4, 8, 32, 16, 32, 128, 256, 1024, 4096 }; + + /// + /// Internal compression engine constant + /// + public static int[] COMPR_FUNC = { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2 }; + + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterEngine.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterEngine.cs new file mode 100644 index 0000000..7b56b59 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterEngine.cs @@ -0,0 +1,832 @@ +// DeflaterEngine.cs +// +// Copyright (C) 2001 Mike Krueger +// Copyright (C) 2004 John Reilly +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +using System; + +namespace Plupload.PngEncoder { + + /// + /// Strategies for deflater + /// + public enum DeflateStrategy { + /// + /// The default strategy + /// + Default = 0, + + /// + /// This strategy will only allow longer string repetitions. It is + /// useful for random data with a small character set. + /// + Filtered = 1, + + + /// + /// This strategy will not look for string repetitions at all. It + /// only encodes with Huffman trees (which means, that more common + /// characters get a smaller encoding. + /// + HuffmanOnly = 2 + } + + // DEFLATE ALGORITHM: + // + // The uncompressed stream is inserted into the window array. When + // the window array is full the first half is thrown away and the + // second half is copied to the beginning. + // + // The head array is a hash table. Three characters build a hash value + // and they the value points to the corresponding index in window of + // the last string with this hash. The prev array implements a + // linked list of matches with the same hash: prev[index & WMASK] points + // to the previous index with the same hash. + // + + + /// + /// Low level compression engine for deflate algorithm which uses a 32K sliding window + /// with secondary compression from Huffman/Shannon-Fano codes. + /// + public class DeflaterEngine : DeflaterConstants { + #region Constants + const int TooFar = 4096; + #endregion + + #region Constructors + /// + /// Construct instance with pending buffer + /// + /// + /// Pending buffer to use + /// > + public DeflaterEngine(DeflaterPending pending) { + this.pending = pending; + huffman = new DeflaterHuffman(pending); + adler = new Adler32(); + + window = new byte[2 * WSIZE]; + head = new short[HASH_SIZE]; + prev = new short[WSIZE]; + + // We start at index 1, to avoid an implementation deficiency, that + // we cannot build a repeat pattern at index 0. + blockStart = strstart = 1; + } + + #endregion + + /// + /// Deflate drives actual compression of data + /// + /// True to flush input buffers + /// Finish deflation with the current input. + /// Returns true if progress has been made. + public bool Deflate(bool flush, bool finish) { + bool progress; + do { + FillWindow(); + bool canFlush = flush && (inputOff == inputEnd); + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + Console.WriteLine("window: [" + blockStart + "," + strstart + "," + + lookahead + "], " + compressionFunction + "," + canFlush); + } +#endif + switch (compressionFunction) { + case DEFLATE_STORED: + progress = DeflateStored(canFlush, finish); + break; + case DEFLATE_FAST: + progress = DeflateFast(canFlush, finish); + break; + case DEFLATE_SLOW: + progress = DeflateSlow(canFlush, finish); + break; + default: + throw new InvalidOperationException("unknown compressionFunction"); + } + } while (pending.IsFlushed && progress); // repeat while we have no pending output and progress was made + return progress; + } + + /// + /// Sets input data to be deflated. Should only be called when NeedsInput() + /// returns true + /// + /// The buffer containing input data. + /// The offset of the first byte of data. + /// The number of bytes of data to use as input. + public void SetInput(byte[] buffer, int offset, int count) { + if (buffer == null) { + throw new ArgumentNullException("buffer"); + } + + if (offset < 0) { + throw new ArgumentOutOfRangeException("offset"); + } + + if (count < 0) { + throw new ArgumentOutOfRangeException("count"); + } + + if (inputOff < inputEnd) { + throw new InvalidOperationException("Old input was not completely processed"); + } + + int end = offset + count; + + /* We want to throw an ArrayIndexOutOfBoundsException early. The + * check is very tricky: it also handles integer wrap around. + */ + if ((offset > end) || (end > buffer.Length)) { + throw new ArgumentOutOfRangeException("count"); + } + + inputBuf = buffer; + inputOff = offset; + inputEnd = end; + } + + /// + /// Determines if more input is needed. + /// + /// Return true if input is needed via SetInput + public bool NeedsInput() { + return (inputEnd == inputOff); + } + + /// + /// Set compression dictionary + /// + /// The buffer containing the dictionary data + /// The offset in the buffer for the first byte of data + /// The length of the dictionary data. + public void SetDictionary(byte[] buffer, int offset, int length) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (strstart != 1) ) + { + throw new InvalidOperationException("strstart not 1"); + } +#endif + adler.Update(buffer, offset, length); + if (length < MIN_MATCH) { + return; + } + + if (length > MAX_DIST) { + offset += length - MAX_DIST; + length = MAX_DIST; + } + + System.Array.Copy(buffer, offset, window, strstart, length); + + UpdateHash(); + --length; + while (--length > 0) { + InsertString(); + strstart++; + } + strstart += 2; + blockStart = strstart; + } + + /// + /// Reset internal state + /// + public void Reset() { + huffman.Reset(); + adler.Reset(); + blockStart = strstart = 1; + lookahead = 0; + totalIn = 0; + prevAvailable = false; + matchLen = MIN_MATCH - 1; + + for (int i = 0; i < HASH_SIZE; i++) { + head[i] = 0; + } + + for (int i = 0; i < WSIZE; i++) { + prev[i] = 0; + } + } + + /// + /// Reset Adler checksum + /// + public void ResetAdler() { + adler.Reset(); + } + + /// + /// Get current value of Adler checksum + /// + public int Adler { + get { + return unchecked((int) adler.Value); + } + } + + /// + /// Total data processed + /// + public long TotalIn { + get { + return totalIn; + } + } + + /// + /// Get/set the deflate strategy + /// + public DeflateStrategy Strategy { + get { + return strategy; + } + set { + strategy = value; + } + } + + /// + /// Set the deflate level (0-9) + /// + /// The value to set the level to. + public void SetLevel(int level) { + if ((level < 0) || (level > 9)) { + throw new ArgumentOutOfRangeException("level"); + } + + goodLength = DeflaterConstants.GOOD_LENGTH[level]; + max_lazy = DeflaterConstants.MAX_LAZY[level]; + niceLength = DeflaterConstants.NICE_LENGTH[level]; + max_chain = DeflaterConstants.MAX_CHAIN[level]; + + if (DeflaterConstants.COMPR_FUNC[level] != compressionFunction) { + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + Console.WriteLine("Change from " + compressionFunction + " to " + + DeflaterConstants.COMPR_FUNC[level]); + } +#endif + switch (compressionFunction) { + case DEFLATE_STORED: + if (strstart > blockStart) { + huffman.FlushStoredBlock(window, blockStart, + strstart - blockStart, false); + blockStart = strstart; + } + UpdateHash(); + break; + + case DEFLATE_FAST: + if (strstart > blockStart) { + huffman.FlushBlock(window, blockStart, strstart - blockStart, + false); + blockStart = strstart; + } + break; + + case DEFLATE_SLOW: + if (prevAvailable) { + huffman.TallyLit(window[strstart - 1] & 0xff); + } + if (strstart > blockStart) { + huffman.FlushBlock(window, blockStart, strstart - blockStart, false); + blockStart = strstart; + } + prevAvailable = false; + matchLen = MIN_MATCH - 1; + break; + } + compressionFunction = COMPR_FUNC[level]; + } + } + + /// + /// Fill the window + /// + public void FillWindow() { + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (strstart >= WSIZE + MAX_DIST) { + SlideWindow(); + } + + /* If there is not enough lookahead, but still some input left, + * read in the input + */ + while (lookahead < DeflaterConstants.MIN_LOOKAHEAD && inputOff < inputEnd) { + int more = 2 * WSIZE - lookahead - strstart; + + if (more > inputEnd - inputOff) { + more = inputEnd - inputOff; + } + + System.Array.Copy(inputBuf, inputOff, window, strstart + lookahead, more); + adler.Update(inputBuf, inputOff, more); + + inputOff += more; + totalIn += more; + lookahead += more; + } + + if (lookahead >= MIN_MATCH) { + UpdateHash(); + } + } + + void UpdateHash() { + /* + if (DEBUGGING) { + Console.WriteLine("updateHash: "+strstart); + } + */ + ins_h = (window[strstart] << HASH_SHIFT) ^ window[strstart + 1]; + } + + /// + /// Inserts the current string in the head hash and returns the previous + /// value for this hash. + /// + /// The previous hash value + int InsertString() { + short match; + int hash = ((ins_h << HASH_SHIFT) ^ window[strstart + (MIN_MATCH - 1)]) & HASH_MASK; + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) + { + if (hash != (((window[strstart] << (2*HASH_SHIFT)) ^ + (window[strstart + 1] << HASH_SHIFT) ^ + (window[strstart + 2])) & HASH_MASK)) { + throw new SharpZipBaseException("hash inconsistent: " + hash + "/" + +window[strstart] + "," + +window[strstart + 1] + "," + +window[strstart + 2] + "," + HASH_SHIFT); + } + } +#endif + prev[strstart & WMASK] = match = head[hash]; + head[hash] = unchecked((short) strstart); + ins_h = hash; + return match & 0xffff; + } + + void SlideWindow() { + Array.Copy(window, WSIZE, window, 0, WSIZE); + matchStart -= WSIZE; + strstart -= WSIZE; + blockStart -= WSIZE; + + // Slide the hash table (could be avoided with 32 bit values + // at the expense of memory usage). + for (int i = 0; i < HASH_SIZE; ++i) { + int m = head[i] & 0xffff; + head[i] = (short) (m >= WSIZE ? (m - WSIZE) : 0); + } + + // Slide the prev table. + for (int i = 0; i < WSIZE; i++) { + int m = prev[i] & 0xffff; + prev[i] = (short) (m >= WSIZE ? (m - WSIZE) : 0); + } + } + + /// + /// Find the best (longest) string in the window matching the + /// string starting at strstart. + /// + /// Preconditions: + /// + /// strstart + MAX_MATCH <= window.length. + /// + /// + /// True if a match greater than the minimum length is found + bool FindLongestMatch(int curMatch) { + int chainLength = this.max_chain; + int niceLength = this.niceLength; + short[] prev = this.prev; + int scan = this.strstart; + int match; + int best_end = this.strstart + matchLen; + int best_len = Math.Max(matchLen, MIN_MATCH - 1); + + int limit = Math.Max(strstart - MAX_DIST, 0); + + int strend = strstart + MAX_MATCH - 1; + byte scan_end1 = window[best_end - 1]; + byte scan_end = window[best_end]; + + // Do not waste too much time if we already have a good match: + if (best_len >= this.goodLength) { + chainLength >>= 2; + } + + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if (niceLength > lookahead) { + niceLength = lookahead; + } + +#if DebugDeflation + + if (DeflaterConstants.DEBUGGING && (strstart > 2 * WSIZE - MIN_LOOKAHEAD)) + { + throw new InvalidOperationException("need lookahead"); + } +#endif + + do { + +#if DebugDeflation + + if (DeflaterConstants.DEBUGGING && (curMatch >= strstart) ) + { + throw new InvalidOperationException("no future"); + } +#endif + if (window[curMatch + best_len] != scan_end || + window[curMatch + best_len - 1] != scan_end1 || + window[curMatch] != window[scan] || + window[curMatch + 1] != window[scan + 1]) { + continue; + } + + match = curMatch + 2; + scan += 2; + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart + 258. + */ + while ( + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + (scan < strend)) { + // Do nothing + } + + if (scan > best_end) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (ins_h == 0) ) + Console.Error.WriteLine("Found match: " + curMatch + "-" + (scan - strstart)); +#endif + matchStart = curMatch; + best_end = scan; + best_len = scan - strstart; + + if (best_len >= niceLength) { + break; + } + + scan_end1 = window[best_end - 1]; + scan_end = window[best_end]; + } + scan = strstart; + } while ((curMatch = (prev[curMatch & WMASK] & 0xffff)) > limit && --chainLength != 0); + + matchLen = Math.Min(best_len, lookahead); + return matchLen >= MIN_MATCH; + } + + bool DeflateStored(bool flush, bool finish) { + if (!flush && (lookahead == 0)) { + return false; + } + + strstart += lookahead; + lookahead = 0; + + int storedLength = strstart - blockStart; + + if ((storedLength >= DeflaterConstants.MAX_BLOCK_SIZE) || // Block is full + (blockStart < WSIZE && storedLength >= MAX_DIST) || // Block may move out of window + flush) { + bool lastBlock = finish; + if (storedLength > DeflaterConstants.MAX_BLOCK_SIZE) { + storedLength = DeflaterConstants.MAX_BLOCK_SIZE; + lastBlock = false; + } + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) + { + Console.WriteLine("storedBlock[" + storedLength + "," + lastBlock + "]"); + } +#endif + + huffman.FlushStoredBlock(window, blockStart, storedLength, lastBlock); + blockStart += storedLength; + return !lastBlock; + } + return true; + } + + bool DeflateFast(bool flush, bool finish) { + if (lookahead < MIN_LOOKAHEAD && !flush) { + return false; + } + + while (lookahead >= MIN_LOOKAHEAD || flush) { + if (lookahead == 0) { + // We are flushing everything + huffman.FlushBlock(window, blockStart, strstart - blockStart, finish); + blockStart = strstart; + return false; + } + + if (strstart > 2 * WSIZE - MIN_LOOKAHEAD) { + /* slide window, as FindLongestMatch needs this. + * This should only happen when flushing and the window + * is almost full. + */ + SlideWindow(); + } + + int hashHead; + if (lookahead >= MIN_MATCH && + (hashHead = InsertString()) != 0 && + strategy != DeflateStrategy.HuffmanOnly && + strstart - hashHead <= MAX_DIST && + FindLongestMatch(hashHead)) { + // longestMatch sets matchStart and matchLen +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) + { + for (int i = 0 ; i < matchLen; i++) { + if (window[strstart + i] != window[matchStart + i]) { + throw new SharpZipBaseException("Match failure"); + } + } + } +#endif + + bool full = huffman.TallyDist(strstart - matchStart, matchLen); + + lookahead -= matchLen; + if (matchLen <= max_lazy && lookahead >= MIN_MATCH) { + while (--matchLen > 0) { + ++strstart; + InsertString(); + } + ++strstart; + } else { + strstart += matchLen; + if (lookahead >= MIN_MATCH - 1) { + UpdateHash(); + } + } + matchLen = MIN_MATCH - 1; + if (!full) { + continue; + } + } else { + // No match found + huffman.TallyLit(window[strstart] & 0xff); + ++strstart; + --lookahead; + } + + if (huffman.IsFull()) { + bool lastBlock = finish && (lookahead == 0); + huffman.FlushBlock(window, blockStart, strstart - blockStart, lastBlock); + blockStart = strstart; + return !lastBlock; + } + } + return true; + } + + bool DeflateSlow(bool flush, bool finish) { + if (lookahead < MIN_LOOKAHEAD && !flush) { + return false; + } + + while (lookahead >= MIN_LOOKAHEAD || flush) { + if (lookahead == 0) { + if (prevAvailable) { + huffman.TallyLit(window[strstart - 1] & 0xff); + } + prevAvailable = false; + + // We are flushing everything +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && !flush) + { + throw new SharpZipBaseException("Not flushing, but no lookahead"); + } +#endif + huffman.FlushBlock(window, blockStart, strstart - blockStart, + finish); + blockStart = strstart; + return false; + } + + if (strstart >= 2 * WSIZE - MIN_LOOKAHEAD) { + /* slide window, as FindLongestMatch needs this. + * This should only happen when flushing and the window + * is almost full. + */ + SlideWindow(); + } + + int prevMatch = matchStart; + int prevLen = matchLen; + if (lookahead >= MIN_MATCH) { + + int hashHead = InsertString(); + + if (strategy != DeflateStrategy.HuffmanOnly && + hashHead != 0 && + strstart - hashHead <= MAX_DIST && + FindLongestMatch(hashHead)) { + + // longestMatch sets matchStart and matchLen + + // Discard match if too small and too far away + if (matchLen <= 5 && (strategy == DeflateStrategy.Filtered || (matchLen == MIN_MATCH && strstart - matchStart > TooFar))) { + matchLen = MIN_MATCH - 1; + } + } + } + + // previous match was better + if ((prevLen >= MIN_MATCH) && (matchLen <= prevLen)) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) + { + for (int i = 0 ; i < matchLen; i++) { + if (window[strstart-1+i] != window[prevMatch + i]) + throw new SharpZipBaseException(); + } + } +#endif + huffman.TallyDist(strstart - 1 - prevMatch, prevLen); + prevLen -= 2; + do { + strstart++; + lookahead--; + if (lookahead >= MIN_MATCH) { + InsertString(); + } + } while (--prevLen > 0); + + strstart++; + lookahead--; + prevAvailable = false; + matchLen = MIN_MATCH - 1; + } else { + if (prevAvailable) { + huffman.TallyLit(window[strstart - 1] & 0xff); + } + prevAvailable = true; + strstart++; + lookahead--; + } + + if (huffman.IsFull()) { + int len = strstart - blockStart; + if (prevAvailable) { + len--; + } + bool lastBlock = (finish && (lookahead == 0) && !prevAvailable); + huffman.FlushBlock(window, blockStart, len, lastBlock); + blockStart += len; + return !lastBlock; + } + } + return true; + } + + #region Instance Fields + + // Hash index of string to be inserted + int ins_h; + + /// + /// Hashtable, hashing three characters to an index for window, so + /// that window[index]..window[index+2] have this hash code. + /// Note that the array should really be unsigned short, so you need + /// to and the values with 0xffff. + /// + short[] head; + + /// + /// prev[index & WMASK] points to the previous index that has the + /// same hash code as the string starting at index. This way + /// entries with the same hash code are in a linked list. + /// Note that the array should really be unsigned short, so you need + /// to and the values with 0xffff. + /// + short[] prev; + + int matchStart; + // Length of best match + int matchLen; + // Set if previous match exists + bool prevAvailable; + int blockStart; + + /// + /// Points to the current character in the window. + /// + int strstart; + + /// + /// lookahead is the number of characters starting at strstart in + /// window that are valid. + /// So window[strstart] until window[strstart+lookahead-1] are valid + /// characters. + /// + int lookahead; + + /// + /// This array contains the part of the uncompressed stream that + /// is of relevance. The current character is indexed by strstart. + /// + byte[] window; + + DeflateStrategy strategy; + int max_chain, max_lazy, niceLength, goodLength; + + /// + /// The current compression function. + /// + int compressionFunction; + + /// + /// The input data for compression. + /// + byte[] inputBuf; + + /// + /// The total bytes of input read. + /// + long totalIn; + + /// + /// The offset into inputBuf, where input data starts. + /// + int inputOff; + + /// + /// The end offset of the input data. + /// + int inputEnd; + + DeflaterPending pending; + DeflaterHuffman huffman; + + /// + /// The adler checksum + /// + Adler32 adler; + #endregion + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterHuffman.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterHuffman.cs new file mode 100644 index 0000000..e94cbd5 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterHuffman.cs @@ -0,0 +1,881 @@ +// DeflaterHuffman.cs +// +// Copyright (C) 2001 Mike Krueger +// Copyright (C) 2004 John Reilly +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +using System; + +namespace Plupload.PngEncoder { + + /// + /// This is the DeflaterHuffman class. + /// + /// This class is not thread safe. This is inherent in the API, due + /// to the split of Deflate and SetInput. + /// + /// author of the original java version : Jochen Hoenicke + /// + public class DeflaterHuffman { + const int BUFSIZE = 1 << (DeflaterConstants.DEFAULT_MEM_LEVEL + 6); + const int LITERAL_NUM = 286; + + // Number of distance codes + const int DIST_NUM = 30; + // Number of codes used to transfer bit lengths + const int BITLEN_NUM = 19; + + // repeat previous bit length 3-6 times (2 bits of repeat count) + const int REP_3_6 = 16; + // repeat a zero length 3-10 times (3 bits of repeat count) + const int REP_3_10 = 17; + // repeat a zero length 11-138 times (7 bits of repeat count) + const int REP_11_138 = 18; + + const int EOF_SYMBOL = 256; + + // The lengths of the bit length codes are sent in order of decreasing + // probability, to avoid transmitting the lengths for unused bit length codes. + static readonly int[] BL_ORDER = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; + + static readonly byte[] bit4Reverse = { + 0, + 8, + 4, + 12, + 2, + 10, + 6, + 14, + 1, + 9, + 5, + 13, + 3, + 11, + 7, + 15 + }; + + static short[] staticLCodes; + static byte[] staticLLength; + static short[] staticDCodes; + static byte[] staticDLength; + + class Tree { + #region Instance Fields + public short[] freqs; + + public byte[] length; + + public int minNumCodes; + + public int numCodes; + + short[] codes; + int[] bl_counts; + int maxLength; + DeflaterHuffman dh; + #endregion + + #region Constructors + public Tree(DeflaterHuffman dh, int elems, int minCodes, int maxLength) { + this.dh = dh; + this.minNumCodes = minCodes; + this.maxLength = maxLength; + freqs = new short[elems]; + bl_counts = new int[maxLength]; + } + + #endregion + + /// + /// Resets the internal state of the tree + /// + public void Reset() { + for (int i = 0; i < freqs.Length; i++) { + freqs[i] = 0; + } + codes = null; + length = null; + } + + public void WriteSymbol(int code) { + // if (DeflaterConstants.DEBUGGING) { + // freqs[code]--; + // // Console.Write("writeSymbol("+freqs.length+","+code+"): "); + // } + dh.pending.WriteBits(codes[code] & 0xffff, length[code]); + } + + /// + /// Check that all frequencies are zero + /// + /// + /// At least one frequency is non-zero + /// + public void CheckEmpty() { + bool empty = true; + for (int i = 0; i < freqs.Length; i++) { + if (freqs[i] != 0) { + //Console.WriteLine("freqs[" + i + "] == " + freqs[i]); + empty = false; + } + } + + if (!empty) { + throw new Exception("!Empty"); + } + } + + /// + /// Set static codes and length + /// + /// new codes + /// length for new codes + public void SetStaticCodes(short[] staticCodes, byte[] staticLengths) { + codes = staticCodes; + length = staticLengths; + } + + /// + /// Build dynamic codes and lengths + /// + public void BuildCodes() { + int numSymbols = freqs.Length; + int[] nextCode = new int[maxLength]; + int code = 0; + + codes = new short[freqs.Length]; + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("buildCodes: "+freqs.Length); + // } + + for (int bits = 0; bits < maxLength; bits++) { + nextCode[bits] = code; + code += bl_counts[bits] << (15 - bits); + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("bits: " + ( bits + 1) + " count: " + bl_counts[bits] + // +" nextCode: "+code); + // } + } + +#if DebugDeflation + if ( DeflaterConstants.DEBUGGING && (code != 65536) ) + { + throw new SharpZipBaseException("Inconsistent bl_counts!"); + } +#endif + for (int i = 0; i < numCodes; i++) { + int bits = length[i]; + if (bits > 0) { + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("codes["+i+"] = rev(" + nextCode[bits-1]+"), + // +bits); + // } + + codes[i] = BitReverse(nextCode[bits - 1]); + nextCode[bits - 1] += 1 << (16 - bits); + } + } + } + + public void BuildTree() { + int numSymbols = freqs.Length; + + /* heap is a priority queue, sorted by frequency, least frequent + * nodes first. The heap is a binary tree, with the property, that + * the parent node is smaller than both child nodes. This assures + * that the smallest node is the first parent. + * + * The binary tree is encoded in an array: 0 is root node and + * the nodes 2*n+1, 2*n+2 are the child nodes of node n. + */ + int[] heap = new int[numSymbols]; + int heapLen = 0; + int maxCode = 0; + for (int n = 0; n < numSymbols; n++) { + int freq = freqs[n]; + if (freq != 0) { + // Insert n into heap + int pos = heapLen++; + int ppos; + while (pos > 0 && freqs[heap[ppos = (pos - 1) / 2]] > freq) { + heap[pos] = heap[ppos]; + pos = ppos; + } + heap[pos] = n; + + maxCode = n; + } + } + + /* We could encode a single literal with 0 bits but then we + * don't see the literals. Therefore we force at least two + * literals to avoid this case. We don't care about order in + * this case, both literals get a 1 bit code. + */ + while (heapLen < 2) { + int node = maxCode < 2 ? ++maxCode : 0; + heap[heapLen++] = node; + } + + numCodes = Math.Max(maxCode + 1, minNumCodes); + + int numLeafs = heapLen; + int[] childs = new int[4 * heapLen - 2]; + int[] values = new int[2 * heapLen - 1]; + int numNodes = numLeafs; + for (int i = 0; i < heapLen; i++) { + int node = heap[i]; + childs[2 * i] = node; + childs[2 * i + 1] = -1; + values[i] = freqs[node] << 8; + heap[i] = i; + } + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + do { + int first = heap[0]; + int last = heap[--heapLen]; + + // Propagate the hole to the leafs of the heap + int ppos = 0; + int path = 1; + + while (path < heapLen) { + if (path + 1 < heapLen && values[heap[path]] > values[heap[path + 1]]) { + path++; + } + + heap[ppos] = heap[path]; + ppos = path; + path = path * 2 + 1; + } + + /* Now propagate the last element down along path. Normally + * it shouldn't go too deep. + */ + int lastVal = values[last]; + while ((path = ppos) > 0 && values[heap[ppos = (path - 1) / 2]] > lastVal) { + heap[path] = heap[ppos]; + } + heap[path] = last; + + + int second = heap[0]; + + // Create a new node father of first and second + last = numNodes++; + childs[2 * last] = first; + childs[2 * last + 1] = second; + int mindepth = Math.Min(values[first] & 0xff, values[second] & 0xff); + values[last] = lastVal = values[first] + values[second] - mindepth + 1; + + // Again, propagate the hole to the leafs + ppos = 0; + path = 1; + + while (path < heapLen) { + if (path + 1 < heapLen && values[heap[path]] > values[heap[path + 1]]) { + path++; + } + + heap[ppos] = heap[path]; + ppos = path; + path = ppos * 2 + 1; + } + + // Now propagate the new element down along path + while ((path = ppos) > 0 && values[heap[ppos = (path - 1) / 2]] > lastVal) { + heap[path] = heap[ppos]; + } + heap[path] = last; + } while (heapLen > 1); + + if (heap[0] != childs.Length / 2 - 1) { + throw new Exception("Heap invariant violated"); + } + + BuildLength(childs); + } + + /// + /// Get encoded length + /// + /// Encoded length, the sum of frequencies * lengths + public int GetEncodedLength() { + int len = 0; + for (int i = 0; i < freqs.Length; i++) { + len += freqs[i] * length[i]; + } + return len; + } + + /// + /// Scan a literal or distance tree to determine the frequencies of the codes + /// in the bit length tree. + /// + public void CalcBLFreq(Tree blTree) { + int max_count; /* max repeat count */ + int min_count; /* min repeat count */ + int count; /* repeat count of the current code */ + int curlen = -1; /* length of current code */ + + int i = 0; + while (i < numCodes) { + count = 1; + int nextlen = length[i]; + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } else { + max_count = 6; + min_count = 3; + if (curlen != nextlen) { + blTree.freqs[nextlen]++; + count = 0; + } + } + curlen = nextlen; + i++; + + while (i < numCodes && curlen == length[i]) { + i++; + if (++count >= max_count) { + break; + } + } + + if (count < min_count) { + blTree.freqs[curlen] += (short) count; + } else if (curlen != 0) { + blTree.freqs[REP_3_6]++; + } else if (count <= 10) { + blTree.freqs[REP_3_10]++; + } else { + blTree.freqs[REP_11_138]++; + } + } + } + + /// + /// Write tree values + /// + /// Tree to write + public void WriteTree(Tree blTree) { + int max_count; // max repeat count + int min_count; // min repeat count + int count; // repeat count of the current code + int curlen = -1; // length of current code + + int i = 0; + while (i < numCodes) { + count = 1; + int nextlen = length[i]; + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } else { + max_count = 6; + min_count = 3; + if (curlen != nextlen) { + blTree.WriteSymbol(nextlen); + count = 0; + } + } + curlen = nextlen; + i++; + + while (i < numCodes && curlen == length[i]) { + i++; + if (++count >= max_count) { + break; + } + } + + if (count < min_count) { + while (count-- > 0) { + blTree.WriteSymbol(curlen); + } + } else if (curlen != 0) { + blTree.WriteSymbol(REP_3_6); + dh.pending.WriteBits(count - 3, 2); + } else if (count <= 10) { + blTree.WriteSymbol(REP_3_10); + dh.pending.WriteBits(count - 3, 3); + } else { + blTree.WriteSymbol(REP_11_138); + dh.pending.WriteBits(count - 11, 7); + } + } + } + + void BuildLength(int[] childs) { + this.length = new byte[freqs.Length]; + int numNodes = childs.Length / 2; + int numLeafs = (numNodes + 1) / 2; + int overflow = 0; + + for (int i = 0; i < maxLength; i++) { + bl_counts[i] = 0; + } + + // First calculate optimal bit lengths + int[] lengths = new int[numNodes]; + lengths[numNodes - 1] = 0; + + for (int i = numNodes - 1; i >= 0; i--) { + if (childs[2 * i + 1] != -1) { + int bitLength = lengths[i] + 1; + if (bitLength > maxLength) { + bitLength = maxLength; + overflow++; + } + lengths[childs[2 * i]] = lengths[childs[2 * i + 1]] = bitLength; + } else { + // A leaf node + int bitLength = lengths[i]; + bl_counts[bitLength - 1]++; + this.length[childs[2 * i]] = (byte) lengths[i]; + } + } + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("Tree "+freqs.Length+" lengths:"); + // for (int i=0; i < numLeafs; i++) { + // //Console.WriteLine("Node "+childs[2*i]+" freq: "+freqs[childs[2*i]] + // + " len: "+length[childs[2*i]]); + // } + // } + + if (overflow == 0) { + return; + } + + int incrBitLen = maxLength - 1; + do { + // Find the first bit length which could increase: + while (bl_counts[--incrBitLen] == 0) + ; + + // Move this node one down and remove a corresponding + // number of overflow nodes. + do { + bl_counts[incrBitLen]--; + bl_counts[++incrBitLen]++; + overflow -= 1 << (maxLength - 1 - incrBitLen); + } while (overflow > 0 && incrBitLen < maxLength - 1); + } while (overflow > 0); + + /* We may have overshot above. Move some nodes from maxLength to + * maxLength-1 in that case. + */ + bl_counts[maxLength - 1] += overflow; + bl_counts[maxLength - 2] -= overflow; + + /* Now recompute all bit lengths, scanning in increasing + * frequency. It is simpler to reconstruct all lengths instead of + * fixing only the wrong ones. This idea is taken from 'ar' + * written by Haruhiko Okumura. + * + * The nodes were inserted with decreasing frequency into the childs + * array. + */ + int nodePtr = 2 * numLeafs; + for (int bits = maxLength; bits != 0; bits--) { + int n = bl_counts[bits - 1]; + while (n > 0) { + int childPtr = 2 * childs[nodePtr++]; + if (childs[childPtr + 1] == -1) { + // We found another leaf + length[childs[childPtr]] = (byte) bits; + n--; + } + } + } + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("*** After overflow elimination. ***"); + // for (int i=0; i < numLeafs; i++) { + // //Console.WriteLine("Node "+childs[2*i]+" freq: "+freqs[childs[2*i]] + // + " len: "+length[childs[2*i]]); + // } + // } + } + + } + + #region Instance Fields + /// + /// Pending buffer to use + /// + public DeflaterPending pending; + + Tree literalTree; + Tree distTree; + Tree blTree; + + // Buffer for distances + short[] d_buf; + byte[] l_buf; + int last_lit; + int extra_bits; + #endregion + + static DeflaterHuffman() { + // See RFC 1951 3.2.6 + // Literal codes + staticLCodes = new short[LITERAL_NUM]; + staticLLength = new byte[LITERAL_NUM]; + + int i = 0; + while (i < 144) { + staticLCodes[i] = BitReverse((0x030 + i) << 8); + staticLLength[i++] = 8; + } + + while (i < 256) { + staticLCodes[i] = BitReverse((0x190 - 144 + i) << 7); + staticLLength[i++] = 9; + } + + while (i < 280) { + staticLCodes[i] = BitReverse((0x000 - 256 + i) << 9); + staticLLength[i++] = 7; + } + + while (i < LITERAL_NUM) { + staticLCodes[i] = BitReverse((0x0c0 - 280 + i) << 8); + staticLLength[i++] = 8; + } + + // Distance codes + staticDCodes = new short[DIST_NUM]; + staticDLength = new byte[DIST_NUM]; + for (i = 0; i < DIST_NUM; i++) { + staticDCodes[i] = BitReverse(i << 11); + staticDLength[i] = 5; + } + } + + /// + /// Construct instance with pending buffer + /// + /// Pending buffer to use + public DeflaterHuffman(DeflaterPending pending) { + this.pending = pending; + + literalTree = new Tree(this, LITERAL_NUM, 257, 15); + distTree = new Tree(this, DIST_NUM, 1, 15); + blTree = new Tree(this, BITLEN_NUM, 4, 7); + + d_buf = new short[BUFSIZE]; + l_buf = new byte[BUFSIZE]; + } + + /// + /// Reset internal state + /// + public void Reset() { + last_lit = 0; + extra_bits = 0; + literalTree.Reset(); + distTree.Reset(); + blTree.Reset(); + } + + /// + /// Write all trees to pending buffer + /// + /// The number/rank of treecodes to send. + public void SendAllTrees(int blTreeCodes) { + blTree.BuildCodes(); + literalTree.BuildCodes(); + distTree.BuildCodes(); + pending.WriteBits(literalTree.numCodes - 257, 5); + pending.WriteBits(distTree.numCodes - 1, 5); + pending.WriteBits(blTreeCodes - 4, 4); + for (int rank = 0; rank < blTreeCodes; rank++) { + pending.WriteBits(blTree.length[BL_ORDER[rank]], 3); + } + literalTree.WriteTree(blTree); + distTree.WriteTree(blTree); + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + blTree.CheckEmpty(); + } +#endif + } + + /// + /// Compress current buffer writing data to pending buffer + /// + public void CompressBlock() { + for (int i = 0; i < last_lit; i++) { + int litlen = l_buf[i] & 0xff; + int dist = d_buf[i]; + if (dist-- != 0) { + // if (DeflaterConstants.DEBUGGING) { + // Console.Write("["+(dist+1)+","+(litlen+3)+"]: "); + // } + + int lc = Lcode(litlen); + literalTree.WriteSymbol(lc); + + int bits = (lc - 261) / 4; + if (bits > 0 && bits <= 5) { + pending.WriteBits(litlen & ((1 << bits) - 1), bits); + } + + int dc = Dcode(dist); + distTree.WriteSymbol(dc); + + bits = dc / 2 - 1; + if (bits > 0) { + pending.WriteBits(dist & ((1 << bits) - 1), bits); + } + } else { + // if (DeflaterConstants.DEBUGGING) { + // if (litlen > 32 && litlen < 127) { + // Console.Write("("+(char)litlen+"): "); + // } else { + // Console.Write("{"+litlen+"}: "); + // } + // } + literalTree.WriteSymbol(litlen); + } + } + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + Console.Write("EOF: "); + } +#endif + literalTree.WriteSymbol(EOF_SYMBOL); + +#if DebugDeflation + if (DeflaterConstants.DEBUGGING) { + literalTree.CheckEmpty(); + distTree.CheckEmpty(); + } +#endif + } + + /// + /// Flush block to output with no compression + /// + /// Data to write + /// Index of first byte to write + /// Count of bytes to write + /// True if this is the last block + public void FlushStoredBlock(byte[] stored, int storedOffset, int storedLength, bool lastBlock) { +#if DebugDeflation + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("Flushing stored block "+ storedLength); + // } +#endif + pending.WriteBits((DeflaterConstants.STORED_BLOCK << 1) + (lastBlock ? 1 : 0), 3); + pending.AlignToByte(); + pending.WriteShort(storedLength); + pending.WriteShort(~storedLength); + pending.WriteBlock(stored, storedOffset, storedLength); + Reset(); + } + + /// + /// Flush block to output with compression + /// + /// Data to flush + /// Index of first byte to flush + /// Count of bytes to flush + /// True if this is the last block + public void FlushBlock(byte[] stored, int storedOffset, int storedLength, bool lastBlock) { + literalTree.freqs[EOF_SYMBOL]++; + + // Build trees + literalTree.BuildTree(); + distTree.BuildTree(); + + // Calculate bitlen frequency + literalTree.CalcBLFreq(blTree); + distTree.CalcBLFreq(blTree); + + // Build bitlen tree + blTree.BuildTree(); + + int blTreeCodes = 4; + for (int i = 18; i > blTreeCodes; i--) { + if (blTree.length[BL_ORDER[i]] > 0) { + blTreeCodes = i + 1; + } + } + int opt_len = 14 + blTreeCodes * 3 + blTree.GetEncodedLength() + + literalTree.GetEncodedLength() + distTree.GetEncodedLength() + + extra_bits; + + int static_len = extra_bits; + for (int i = 0; i < LITERAL_NUM; i++) { + static_len += literalTree.freqs[i] * staticLLength[i]; + } + for (int i = 0; i < DIST_NUM; i++) { + static_len += distTree.freqs[i] * staticDLength[i]; + } + if (opt_len >= static_len) { + // Force static trees + opt_len = static_len; + } + + if (storedOffset >= 0 && storedLength + 4 < opt_len >> 3) { + // Store Block + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("Storing, since " + storedLength + " < " + opt_len + // + " <= " + static_len); + // } + FlushStoredBlock(stored, storedOffset, storedLength, lastBlock); + } else if (opt_len == static_len) { + // Encode with static tree + pending.WriteBits((DeflaterConstants.STATIC_TREES << 1) + (lastBlock ? 1 : 0), 3); + literalTree.SetStaticCodes(staticLCodes, staticLLength); + distTree.SetStaticCodes(staticDCodes, staticDLength); + CompressBlock(); + Reset(); + } else { + // Encode with dynamic tree + pending.WriteBits((DeflaterConstants.DYN_TREES << 1) + (lastBlock ? 1 : 0), 3); + SendAllTrees(blTreeCodes); + CompressBlock(); + Reset(); + } + } + + /// + /// Get value indicating if internal buffer is full + /// + /// true if buffer is full + public bool IsFull() { + return last_lit >= BUFSIZE; + } + + /// + /// Add literal to buffer + /// + /// Literal value to add to buffer. + /// Value indicating internal buffer is full + public bool TallyLit(int literal) { + // if (DeflaterConstants.DEBUGGING) { + // if (lit > 32 && lit < 127) { + // //Console.WriteLine("("+(char)lit+")"); + // } else { + // //Console.WriteLine("{"+lit+"}"); + // } + // } + d_buf[last_lit] = 0; + l_buf[last_lit++] = (byte) literal; + literalTree.freqs[literal]++; + return IsFull(); + } + + /// + /// Add distance code and length to literal and distance trees + /// + /// Distance code + /// Length + /// Value indicating if internal buffer is full + public bool TallyDist(int distance, int length) { + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("[" + distance + "," + length + "]"); + // } + + d_buf[last_lit] = (short) distance; + l_buf[last_lit++] = (byte) (length - 3); + + int lc = Lcode(length - 3); + literalTree.freqs[lc]++; + if (lc >= 265 && lc < 285) { + extra_bits += (lc - 261) / 4; + } + + int dc = Dcode(distance - 1); + distTree.freqs[dc]++; + if (dc >= 4) { + extra_bits += dc / 2 - 1; + } + return IsFull(); + } + + + /// + /// Reverse the bits of a 16 bit value. + /// + /// Value to reverse bits + /// Value with bits reversed + public static short BitReverse(int toReverse) { + return (short) (bit4Reverse[toReverse & 0xF] << 12 | + bit4Reverse[(toReverse >> 4) & 0xF] << 8 | + bit4Reverse[(toReverse >> 8) & 0xF] << 4 | + bit4Reverse[toReverse >> 12]); + } + + static int Lcode(int length) { + if (length == 255) { + return 285; + } + + int code = 257; + while (length >= 8) { + code += 4; + length >>= 1; + } + return code + length; + } + + static int Dcode(int distance) { + int code = 0; + while (distance >= 4) { + code += 2; + distance >>= 1; + } + return code + distance; + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterOutputStream.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterOutputStream.cs new file mode 100644 index 0000000..4556487 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterOutputStream.cs @@ -0,0 +1,469 @@ +// DeflaterOutputStream.cs +// +// Copyright (C) 2001 Mike Krueger +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +using System; +using System.IO; + +namespace Plupload.PngEncoder { + /// + /// A special stream deflating or compressing the bytes that are + /// written to it. It uses a Deflater to perform actual deflating.
+ /// Authors of the original java version : Tom Tromey, Jochen Hoenicke + ///
+ public class DeflaterOutputStream : Stream { + #region Constructors + /// + /// Creates a new DeflaterOutputStream with a default Deflater and default buffer size. + /// + /// + /// the output stream where deflated output should be written. + /// + public DeflaterOutputStream(Stream baseOutputStream) + : this(baseOutputStream, new Deflater(), 512) { + } + + /// + /// Creates a new DeflaterOutputStream with the given Deflater and + /// default buffer size. + /// + /// + /// the output stream where deflated output should be written. + /// + /// + /// the underlying deflater. + /// + public DeflaterOutputStream(Stream baseOutputStream, Deflater deflater) + : this(baseOutputStream, deflater, 512) { + } + + /// + /// Creates a new DeflaterOutputStream with the given Deflater and + /// buffer size. + /// + /// + /// The output stream where deflated output is written. + /// + /// + /// The underlying deflater to use + /// + /// + /// The buffer size to use when deflating + /// + /// + /// bufsize is less than or equal to zero. + /// + /// + /// baseOutputStream does not support writing + /// + /// + /// deflater instance is null + /// + public DeflaterOutputStream(Stream baseOutputStream, Deflater deflater, int bufferSize) { + if (baseOutputStream == null) { + throw new ArgumentNullException("baseOutputStream"); + } + + if (baseOutputStream.CanWrite == false) { + throw new ArgumentException("Must support writing", "baseOutputStream"); + } + + if (deflater == null) { + throw new ArgumentNullException("deflater"); + } + + if (bufferSize <= 0) { + throw new ArgumentOutOfRangeException("bufferSize"); + } + + baseOutputStream_ = baseOutputStream; + buffer_ = new byte[bufferSize]; + deflater_ = deflater; + } + #endregion + + #region Public API + /// + /// Finishes the stream by calling finish() on the deflater. + /// + /// + /// Not all input is deflated + /// + public virtual void Finish() { + deflater_.Finish(); + while (!deflater_.IsFinished) { + int len = deflater_.Deflate(buffer_, 0, buffer_.Length); + if (len <= 0) { + break; + } + + if (keys != null) { + EncryptBlock(buffer_, 0, len); + } + + baseOutputStream_.Write(buffer_, 0, len); + } + + if (!deflater_.IsFinished) { + throw new Exception("Can't deflate all input?"); + } + + baseOutputStream_.Flush(); + + + if (keys != null) { + keys = null; + } + + } + + /// + /// Get/set flag indicating ownership of the underlying stream. + /// When the flag is true will close the underlying stream also. + /// + public bool IsStreamOwner { + get { return isStreamOwner_; } + set { isStreamOwner_ = value; } + } + + /// + /// Allows client to determine if an entry can be patched after its added + /// + public bool CanPatchEntries { + get { + return baseOutputStream_.CanSeek; + } + } + + #endregion + + #region Encryption + + string password; + + uint[] keys; + + /// + /// Get/set the password used for encryption. + /// + /// When set to null or if the password is empty no encryption is performed + public string Password { + get { + return password; + } + set { + if ((value != null) && (value.Length == 0)) { + password = null; + } else { + password = value; + } + } + } + + /// + /// Encrypt a block of data + /// + /// + /// Data to encrypt. NOTE the original contents of the buffer are lost + /// + /// + /// Offset of first byte in buffer to encrypt + /// + /// + /// Number of bytes in buffer to encrypt + /// + protected void EncryptBlock(byte[] buffer, int offset, int length) { + for (int i = offset; i < offset + length; ++i) { + byte oldbyte = buffer[i]; + buffer[i] ^= EncryptByte(); + UpdateKeys(oldbyte); + } + } + + /// + /// Encrypt a single byte + /// + /// + /// The encrypted value + /// + protected byte EncryptByte() { + uint temp = ((keys[2] & 0xFFFF) | 2); + return (byte) ((temp * (temp ^ 1)) >> 8); + } + + /// + /// Update encryption keys + /// + protected void UpdateKeys(byte ch) { + keys[0] = Crc32.ComputeCrc32(keys[0], ch); + keys[1] = keys[1] + (byte) keys[0]; + keys[1] = keys[1] * 134775813 + 1; + keys[2] = Crc32.ComputeCrc32(keys[2], (byte) (keys[1] >> 24)); + } + + #endregion + + #region Deflation Support + /// + /// Deflates everything in the input buffers. This will call + /// def.deflate() until all bytes from the input buffers + /// are processed. + /// + protected void Deflate() { + while (!deflater_.IsNeedingInput) { + int deflateCount = deflater_.Deflate(buffer_, 0, buffer_.Length); + + if (deflateCount <= 0) { + break; + } + + if (keys != null) { + EncryptBlock(buffer_, 0, deflateCount); + } + + baseOutputStream_.Write(buffer_, 0, deflateCount); + } + + if (!deflater_.IsNeedingInput) { + throw new Exception("DeflaterOutputStream can't deflate all input?"); + } + } + #endregion + + #region Stream Overrides + /// + /// Gets value indicating stream can be read from + /// + public override bool CanRead { + get { + return false; + } + } + + /// + /// Gets a value indicating if seeking is supported for this stream + /// This property always returns false + /// + public override bool CanSeek { + get { + return false; + } + } + + /// + /// Get value indicating if this stream supports writing + /// + public override bool CanWrite { + get { + return baseOutputStream_.CanWrite; + } + } + + /// + /// Get current length of stream + /// + public override long Length { + get { + return baseOutputStream_.Length; + } + } + + /// + /// Gets the current position within the stream. + /// + /// Any attempt to set position + public override long Position { + get { + return baseOutputStream_.Position; + } + set { + throw new NotSupportedException("Position property not supported"); + } + } + + /// + /// Sets the current position of this stream to the given value. Not supported by this class! + /// + /// The offset relative to the to seek. + /// The to seek from. + /// The new position in the stream. + /// Any access + public override long Seek(long offset, SeekOrigin origin) { + throw new NotSupportedException("DeflaterOutputStream Seek not supported"); + } + + /// + /// Sets the length of this stream to the given value. Not supported by this class! + /// + /// The new stream length. + /// Any access + public override void SetLength(long value) { + throw new NotSupportedException("DeflaterOutputStream SetLength not supported"); + } + + /// + /// Read a byte from stream advancing position by one + /// + /// The byte read cast to an int. THe value is -1 if at the end of the stream. + /// Any access + public override int ReadByte() { + throw new NotSupportedException("DeflaterOutputStream ReadByte not supported"); + } + + /// + /// Read a block of bytes from stream + /// + /// The buffer to store read data in. + /// The offset to start storing at. + /// The maximum number of bytes to read. + /// The actual number of bytes read. Zero if end of stream is detected. + /// Any access + public override int Read(byte[] buffer, int offset, int count) { + throw new NotSupportedException("DeflaterOutputStream Read not supported"); + } + + /// + /// Asynchronous reads are not supported a NotSupportedException is always thrown + /// + /// The buffer to read into. + /// The offset to start storing data at. + /// The number of bytes to read + /// The async callback to use. + /// The state to use. + /// Returns an + /// Any access + public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback callback, object state) { + throw new NotSupportedException("DeflaterOutputStream BeginRead not currently supported"); + } + + /// + /// Asynchronous writes arent supported, a NotSupportedException is always thrown + /// + /// The buffer to write. + /// The offset to begin writing at. + /// The number of bytes to write. + /// The to use. + /// The state object. + /// Returns an IAsyncResult. + /// Any access + public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback callback, object state) { + throw new NotSupportedException("BeginWrite is not supported"); + } + + /// + /// Flushes the stream by calling Flush on the deflater and then + /// on the underlying stream. This ensures that all bytes are flushed. + /// + public override void Flush() { + deflater_.Flush(); + Deflate(); + baseOutputStream_.Flush(); + } + + /// + /// Calls and closes the underlying + /// stream when is true. + /// + public override void Close() { + if (!isClosed_) { + isClosed_ = true; + + try { + Finish(); + + keys = null; + } finally { + if (isStreamOwner_) { + baseOutputStream_.Close(); + } + } + } + } + + /// + /// Writes a single byte to the compressed output stream. + /// + /// + /// The byte value. + /// + public override void WriteByte(byte value) { + byte[] b = new byte[1]; + b[0] = value; + Write(b, 0, 1); + } + + /// + /// Writes bytes from an array to the compressed stream. + /// + /// + /// The byte array + /// + /// + /// The offset into the byte array where to start. + /// + /// + /// The number of bytes to write. + /// + public override void Write(byte[] buffer, int offset, int count) { + deflater_.SetInput(buffer, offset, count); + Deflate(); + } + #endregion + + #region Instance Fields + /// + /// This buffer is used temporarily to retrieve the bytes from the + /// deflater and write them to the underlying output stream. + /// + byte[] buffer_; + + /// + /// The deflater which is used to deflate the stream. + /// + protected Deflater deflater_; + + /// + /// Base stream the deflater depends on. + /// + protected Stream baseOutputStream_; + + bool isClosed_; + + bool isStreamOwner_ = true; + #endregion + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterPending.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterPending.cs new file mode 100644 index 0000000..3d5ea36 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/DeflaterPending.cs @@ -0,0 +1,55 @@ +// DeflaterPending.cs +// +// Copyright (C) 2001 Mike Krueger +// Copyright (C) 2004 John Reilly +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +namespace Plupload.PngEncoder { + + /// + /// This class stores the pending output of the Deflater. + /// + /// author of the original java version : Jochen Hoenicke + /// + public class DeflaterPending : PendingBuffer { + /// + /// Construct instance with default buffer size + /// + public DeflaterPending() + : base(DeflaterConstants.PENDING_BUF_SIZE) { + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/IChecksum.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/IChecksum.cs new file mode 100644 index 0000000..66c5111 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/IChecksum.cs @@ -0,0 +1,90 @@ +// IChecksum.cs - Interface to compute a data checksum +// Copyright (C) 2001 Mike Krueger +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +namespace Plupload.PngEncoder { + + /// + /// Interface to compute a data checksum used by checked input/output streams. + /// A data checksum can be updated by one byte or with a byte array. After each + /// update the value of the current checksum can be returned by calling + /// getValue. The complete checksum object can also be reset + /// so it can be used again with new data. + /// + public interface IChecksum { + /// + /// Returns the data checksum computed so far. + /// + long Value { + get; + } + + /// + /// Resets the data checksum as if no update was ever called. + /// + void Reset(); + + /// + /// Adds one byte to the data checksum. + /// + /// + /// the data value to add. The high byte of the int is ignored. + /// + void Update(int value); + + /// + /// Updates the data checksum with the bytes taken from the array. + /// + /// + /// buffer an array of bytes + /// + void Update(byte[] buffer); + + /// + /// Adds the byte array to the data checksum. + /// + /// + /// The buffer which contains the data + /// + /// + /// The offset in the buffer where the data starts + /// + /// + /// the number of data bytes to add. + /// + void Update(byte[] buffer, int offset, int count); + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/PendingBuffer.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/PendingBuffer.cs new file mode 100644 index 0000000..d1ef02d --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/PendingBuffer.cs @@ -0,0 +1,281 @@ +// PendingBuffer.cs +// +// Copyright (C) 2001 Mike Krueger +// Copyright (C) 2004 John Reilly +// +// This file was translated from java, it was part of the GNU Classpath +// Copyright (C) 2001 Free Software Foundation, Inc. +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +// Linking this library statically or dynamically with other modules is +// making a combined work based on this library. Thus, the terms and +// conditions of the GNU General Public License cover the whole +// combination. +// +// As a special exception, the copyright holders of this library give you +// permission to link this library with independent modules to produce an +// executable, regardless of the license terms of these independent +// modules, and to copy and distribute the resulting executable under +// terms of your choice, provided that you also meet, for each linked +// independent module, the terms and conditions of the license of that +// module. An independent module is a module which is not derived from +// or based on this library. If you modify this library, you may extend +// this exception to your version of the library, but you are not +// obligated to do so. If you do not wish to do so, delete this +// exception statement from your version. + +using System; + +namespace Plupload.PngEncoder { + + /// + /// This class is general purpose class for writing data to a buffer. + /// + /// It allows you to write bits as well as bytes + /// Based on DeflaterPending.java + /// + /// author of the original java version : Jochen Hoenicke + /// + public class PendingBuffer { + #region Instance Fields + /// + /// Internal work buffer + /// + byte[] buffer_; + + int start; + int end; + + uint bits; + int bitCount; + #endregion + + #region Constructors + /// + /// construct instance using default buffer size of 4096 + /// + public PendingBuffer() + : this(4096) { + } + + /// + /// construct instance using specified buffer size + /// + /// + /// size to use for internal buffer + /// + public PendingBuffer(int bufferSize) { + buffer_ = new byte[bufferSize]; + } + + #endregion + + /// + /// Clear internal state/buffers + /// + public void Reset() { + start = end = bitCount = 0; + } + + /// + /// Write a byte to buffer + /// + /// + /// The value to write + /// + public void WriteByte(int value) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + buffer_[end++] = unchecked((byte) value); + } + + /// + /// Write a short value to buffer LSB first + /// + /// + /// The value to write. + /// + public void WriteShort(int value) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + buffer_[end++] = unchecked((byte) value); + buffer_[end++] = unchecked((byte) (value >> 8)); + } + + /// + /// write an integer LSB first + /// + /// The value to write. + public void WriteInt(int value) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + buffer_[end++] = unchecked((byte) value); + buffer_[end++] = unchecked((byte) (value >> 8)); + buffer_[end++] = unchecked((byte) (value >> 16)); + buffer_[end++] = unchecked((byte) (value >> 24)); + } + + /// + /// Write a block of data to buffer + /// + /// data to write + /// offset of first byte to write + /// number of bytes to write + public void WriteBlock(byte[] block, int offset, int length) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + System.Array.Copy(block, offset, buffer_, end, length); + end += length; + } + + /// + /// The number of bits written to the buffer + /// + public int BitCount { + get { + return bitCount; + } + } + + /// + /// Align internal buffer on a byte boundary + /// + public void AlignToByte() { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + if (bitCount > 0) { + buffer_[end++] = unchecked((byte) bits); + if (bitCount > 8) { + buffer_[end++] = unchecked((byte) (bits >> 8)); + } + } + bits = 0; + bitCount = 0; + } + + /// + /// Write bits to internal buffer + /// + /// source of bits + /// number of bits to write + public void WriteBits(int b, int count) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } + + // if (DeflaterConstants.DEBUGGING) { + // //Console.WriteLine("writeBits("+b+","+count+")"); + // } +#endif + bits |= (uint) (b << bitCount); + bitCount += count; + if (bitCount >= 16) { + buffer_[end++] = unchecked((byte) bits); + buffer_[end++] = unchecked((byte) (bits >> 8)); + bits >>= 16; + bitCount -= 16; + } + } + + /// + /// Write a short value to internal buffer most significant byte first + /// + /// value to write + public void WriteShortMSB(int s) { +#if DebugDeflation + if (DeflaterConstants.DEBUGGING && (start != 0) ) + { + throw new SharpZipBaseException("Debug check: start != 0"); + } +#endif + buffer_[end++] = unchecked((byte) (s >> 8)); + buffer_[end++] = unchecked((byte) s); + } + + /// + /// Indicates if buffer has been flushed + /// + public bool IsFlushed { + get { + return end == 0; + } + } + + /// + /// Flushes the pending buffer into the given output array. If the + /// output array is to small, only a partial flush is done. + /// + /// The output array. + /// The offset into output array. + /// The maximum number of bytes to store. + /// The number of bytes flushed. + public int Flush(byte[] output, int offset, int length) { + if (bitCount >= 8) { + buffer_[end++] = unchecked((byte) bits); + bits >>= 8; + bitCount -= 8; + } + + if (length > end - start) { + length = end - start; + System.Array.Copy(buffer_, start, output, offset, length); + start = 0; + end = 0; + } else { + System.Array.Copy(buffer_, start, output, offset, length); + start += length; + } + return length; + } + + /// + /// Convert internal buffer to byte array. + /// Buffer is empty on completion + /// + /// + /// The internal buffer contents converted to a byte array. + /// + public byte[] ToByteArray() { + byte[] result = new byte[end - start]; + System.Array.Copy(buffer_, start, result, 0, result.Length); + start = 0; + end = 0; + return result; + } + } +} diff --git a/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/PngEncoder.cs b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/PngEncoder.cs new file mode 100644 index 0000000..7ef7366 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/PngEncoder/PngEncoder.cs @@ -0,0 +1,467 @@ +/** + * PngEncoder takes a pixel data byte array and creates a byte string which can be saved as a PNG file. + * + *

Thanks to Jay Denny at KeyPoint Software + * http://www.keypoint.com/ + * who let me develop this code on company time.

+ * + *

You may contact me with (probably very-much-needed) improvements, + * comments, and bug fixes at:

+ * + *

david@catcode.com

+ * + *

This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version.

+ * + *

This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details.

+ * + *

You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * A copy of the GNU LGPL may be found at + * http://www.gnu.org/copyleft/lesser.html

+ * + * @author J. David Eisenberg + * @version 1.5, 19 Oct 2003 + * + * CHANGES: + * -------- + * 19-Nov-2002 : CODING STYLE CHANGES ONLY (by David Gilbert for Object Refinery Limited); + * 19-Sep-2003 : Fix for platforms using EBCDIC (contributed by Paulo Soares); + * 19-Oct-2003 : Change private fields to protected fields so that + * PngEncoderB can inherit them (JDE) + * Fixed bug with calculation of nRows + * 2009-12-22 : Ported Java version over to C#. + */ + +using System; +using System.IO; + +namespace Plupload.PngEncoder { + public class PngEncoder { + /** Constant specifying that alpha channel should be encoded. */ + public const bool ENCODE_ALPHA = true; + + /** Constant specifying that alpha channel should not be encoded. */ + public const bool NO_ALPHA = false; + + /** Constants for filter (NONE) */ + public const int FILTER_NONE = 0; + + /** Constants for filter (SUB) */ + public const int FILTER_SUB = 1; + + /** Constants for filter (UP) */ + public const int FILTER_UP = 2; + + /** Constants for filter (LAST) */ + public const int FILTER_LAST = 2; + + /** IHDR tag. */ + protected static byte[] IHDR = new byte[] { 73, 72, 68, 82 }; + + /** IDAT tag. */ + protected static byte[] IDAT = new byte[] { 73, 68, 65, 84 }; + + /** IEND tag. */ + protected static byte[] IEND = new byte[] { 73, 69, 78, 68 }; + + /** The png bytes. */ + protected byte[] pngBytes; + + /** The prior row. */ + protected byte[] priorRow; + + /** The left bytes. */ + protected byte[] leftBytes; + + /** The width. */ + protected int width, height; + + /** The byte position. */ + protected int bytePos, maxPos; + + /** CRC. */ + protected Crc32 crc = new Crc32(); + + /** The CRC value. */ + protected long crcValue; + + /** Encode alpha? */ + protected bool encodeAlpha; + + /** The filter type. */ + protected int filter; + + /** The bytes-per-pixel. */ + protected int bytesPerPixel; + + /** The compression level. */ + protected int compressionLevel; + + /** PixelData array to encode */ + protected int[] pixelData; + + /** + * Class constructor specifying Image source to encode, whether to encode alpha, filter to use, + * and compression level. + * + * @param pixel_data A Java Image object + * @param encodeAlpha Encode the alpha channel? false=no; true=yes + * @param whichFilter 0=none, 1=sub, 2=up + * @param compLevel 0..9 + * @see java.awt.Image + */ + public PngEncoder(int[] pixel_data, int width, int height, bool encodeAlpha, int whichFilter, int compLevel) { + this.pixelData = pixel_data; + this.width = width; + this.height = height; + this.encodeAlpha = encodeAlpha; + + this.filter = FILTER_NONE; + if (whichFilter <= FILTER_LAST) { + this.filter = whichFilter; + } + + if (compLevel >= 0 && compLevel <= 9) { + this.compressionLevel = compLevel; + } + } + + /** + * Creates an array of bytes that is the PNG equivalent of the current image, specifying + * whether to encode alpha or not. + * + * @param encodeAlpha boolean false=no alpha, true=encode alpha + * @return an array of bytes, or null if there was a problem + */ + public byte[] Encode(bool encodeAlpha) { + byte[] pngIdBytes = { 0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A }; + + /* + * start with an array that is big enough to hold all the pixels + * (plus filter bytes), and an extra 200 bytes for header info + */ + pngBytes = new byte[((width + 1) * height * 3) + 200]; + + /* + * keep track of largest byte written to the array + */ + maxPos = 0; + + bytePos = WriteBytes(pngIdBytes, 0); + //hdrPos = bytePos; + writeHeader(); + //dataPos = bytePos; + if (WriteImageData()) { + writeEnd(); + pngBytes = ResizeByteArray(pngBytes, maxPos); + } else { + pngBytes = null; + } + return pngBytes; + } + + /** + * Creates an array of bytes that is the PNG equivalent of the current image. + * Alpha encoding is determined by its setting in the constructor. + * + * @return an array of bytes, or null if there was a problem + */ + public byte[] pngEncode() { + return Encode(encodeAlpha); + } + + /** + * Increase or decrease the length of a byte array. + * + * @param array The original array. + * @param newLength The length you wish the new array to have. + * @return Array of newly desired length. If shorter than the + * original, the trailing elements are truncated. + */ + protected byte[] ResizeByteArray(byte[] array, int newLength) { + byte[] newArray = new byte[newLength]; + int oldLength = array.Length; + + Array.Copy(array, 0, newArray, 0, Math.Min(oldLength, newLength)); + return newArray; + } + + /** + * Write an array of bytes into the pngBytes array. + * Note: This routine has the side effect of updating + * maxPos, the largest element written in the array. + * The array is resized by 1000 bytes or the length + * of the data to be written, whichever is larger. + * + * @param data The data to be written into pngBytes. + * @param offset The starting point to write to. + * @return The next place to be written to in the pngBytes array. + */ + protected int WriteBytes(byte[] data, int offset) { + maxPos = Math.Max(maxPos, offset + data.Length); + if (data.Length + offset > pngBytes.Length) + pngBytes = ResizeByteArray(pngBytes, pngBytes.Length + Math.Max(1000, data.Length)); + + Array.Copy(data, 0, pngBytes, offset, data.Length); + return offset + data.Length; + } + + /** + * Write an array of bytes into the pngBytes array, specifying number of bytes to write. + * Note: This routine has the side effect of updating + * maxPos, the largest element written in the array. + * The array is resized by 1000 bytes or the length + * of the data to be written, whichever is larger. + * + * @param data The data to be written into pngBytes. + * @param nBytes The number of bytes to be written. + * @param offset The starting point to write to. + * @return The next place to be written to in the pngBytes array. + */ + protected int WriteBytes(byte[] data, int nBytes, int offset) { + maxPos = Math.Max(maxPos, offset + nBytes); + if (nBytes + offset > pngBytes.Length) + pngBytes = ResizeByteArray(pngBytes, pngBytes.Length + Math.Max(1000, nBytes)); + + Array.Copy(data, 0, pngBytes, offset, nBytes); + return offset + nBytes; + } + + /** + * Write a two-byte integer into the pngBytes array at a given position. + * + * @param n The integer to be written into pngBytes. + * @param offset The starting point to write to. + * @return The next place to be written to in the pngBytes array. + */ + protected int WriteInt2(int n, int offset) { + byte[] temp = { (byte) ((n >> 8) & 0xff), (byte) (n & 0xff) }; + + return WriteBytes(temp, offset); + } + + /** + * Write a four-byte integer into the pngBytes array at a given position. + * + * @param n The integer to be written into pngBytes. + * @param offset The starting point to write to. + * @return The next place to be written to in the pngBytes array. + */ + protected int WriteInt4(int n, int offset) { + byte[] temp = {(byte) ((n >> 24) & 0xff), + (byte) ((n >> 16) & 0xff), + (byte) ((n >> 8) & 0xff), + (byte) (n & 0xff)}; + + return WriteBytes(temp, offset); + } + + /** + * Write a single byte into the pngBytes array at a given position. + * + * @param b The integer to be written into pngBytes. + * @param offset The starting point to write to. + * @return The next place to be written to in the pngBytes array. + */ + protected int WriteByte(int b, int offset) { + byte[] temp = { (byte) b }; + + return WriteBytes(temp, offset); + } + + /** + * Write a PNG "IHDR" chunk into the pngBytes array. + */ + protected void writeHeader() { + int startPos; + + startPos = bytePos = WriteInt4(13, bytePos); + + bytePos = WriteBytes(IHDR, bytePos); + bytePos = WriteInt4(width, bytePos); + bytePos = WriteInt4(height, bytePos); + bytePos = WriteByte(8, bytePos); // bit depth + bytePos = WriteByte((encodeAlpha) ? 6 : 2, bytePos); // direct model + bytePos = WriteByte(0, bytePos); // compression method + bytePos = WriteByte(0, bytePos); // filter method + bytePos = WriteByte(0, bytePos); // no interlace + + crc.Reset(); + crc.Update(pngBytes, startPos, bytePos - startPos); + crcValue = crc.Value; + + bytePos = WriteInt4((int) crcValue, bytePos); + } + + /** + * Perform "sub" filtering on the given row. + * Uses temporary array leftBytes to store the original values + * of the previous pixels. The array is 16 bytes long, which + * will easily hold two-byte samples plus two-byte alpha. + * + * @param pixels The array holding the scan lines being built + * @param startPos Starting position within pixels of bytes to be filtered. + * @param width Width of a scanline in pixels. + */ + protected void FilterSub(byte[] pixels, int startPos, int width) { + int i; + int offset = bytesPerPixel; + int actualStart = startPos + offset; + int nBytes = width * bytesPerPixel; + int leftInsert = offset; + int leftExtract = 0; + + for (i = actualStart; i < startPos + nBytes; i++) { + leftBytes[leftInsert] = pixels[i]; + pixels[i] = (byte) ((pixels[i] - leftBytes[leftExtract]) % 256); + leftInsert = (leftInsert + 1) % 0x0f; + leftExtract = (leftExtract + 1) % 0x0f; + } + } + + /** + * Perform "up" filtering on the given row. + * Side effect: refills the prior row with current row + * + * @param pixels The array holding the scan lines being built + * @param startPos Starting position within pixels of bytes to be filtered. + * @param width Width of a scanline in pixels. + */ + protected void FilterUp(byte[] pixels, int startPos, int width) { + int i, nBytes; + byte currentByte; + + nBytes = width * bytesPerPixel; + + for (i = 0; i < nBytes; i++) { + currentByte = pixels[startPos + i]; + pixels[startPos + i] = (byte) ((pixels[startPos + i] - priorRow[i]) % 256); + priorRow[i] = currentByte; + } + } + + /** + * Write the image data into the pngBytes array. + * This will write one or more PNG "IDAT" chunks. In order + * to conserve memory, this method grabs as many rows as will + * fit into 32K bytes, or the whole image; whichever is less. + * + * + * @return true if no errors; false if error grabbing pixels + */ + protected bool WriteImageData() { + int rowsLeft = height; // number of rows remaining to write + int startRow = 0; // starting row to process this time through + int nRows; // how many rows to grab at a time + + byte[] scanLines; // the scan lines to be compressed + int scanPos; // where we are in the scan lines + int startPos; // where this line's actual pixels start (used for filtering) + + byte[] compressedLines; // the resultant compressed lines + int nCompressed; // how big is the compressed area? + + //int depth; // color depth ( handle only 8 or 32 ) + + bytesPerPixel = (encodeAlpha) ? 4 : 3; + + Deflater scrunch = new Deflater(compressionLevel); + MemoryStream outBytes = new MemoryStream(1024); + + DeflaterOutputStream compBytes = new DeflaterOutputStream(outBytes, scrunch); + try { + while (rowsLeft > 0) { + nRows = Math.Min(32767 / (width * (bytesPerPixel + 1)), rowsLeft); + nRows = Math.Max(nRows, 1); + + int[] pixels = new int[width * nRows]; + Array.Copy(this.pixelData, width * startRow, pixels, 0, width * nRows); + + /* + * Create a data chunk. scanLines adds "nRows" for + * the filter bytes. + */ + scanLines = new byte[width * nRows * bytesPerPixel + nRows]; + + if (filter == FILTER_SUB) { + leftBytes = new byte[16]; + } + if (filter == FILTER_UP) { + priorRow = new byte[width * bytesPerPixel]; + } + + scanPos = 0; + startPos = 1; + for (int i = 0; i < width * nRows; i++) { + if (i % width == 0) { + scanLines[scanPos++] = (byte) filter; + startPos = scanPos; + } + scanLines[scanPos++] = (byte) ((pixels[i] >> 16) & 0xff); + scanLines[scanPos++] = (byte) ((pixels[i] >> 8) & 0xff); + scanLines[scanPos++] = (byte) ((pixels[i]) & 0xff); + if (encodeAlpha) { + scanLines[scanPos++] = (byte) ((pixels[i] >> 24) & 0xff); + } + if ((i % width == width - 1) && (filter != FILTER_NONE)) { + if (filter == FILTER_SUB) { + FilterSub(scanLines, startPos, width); + } + if (filter == FILTER_UP) { + FilterUp(scanLines, startPos, width); + } + } + } + + /* + * Write these lines to the output area + */ + compBytes.Write(scanLines, 0, scanPos); + + startRow += nRows; + rowsLeft -= nRows; + } + compBytes.Close(); + + /* + * Write the compressed bytes + */ + compressedLines = outBytes.ToArray(); + nCompressed = compressedLines.Length; + + crc.Reset(); + bytePos = WriteInt4(nCompressed, bytePos); + bytePos = WriteBytes(IDAT, bytePos); + crc.Update(IDAT); + bytePos = WriteBytes(compressedLines, nCompressed, bytePos); + crc.Update(compressedLines, 0, nCompressed); + + crcValue = crc.Value; + bytePos = WriteInt4((int) crcValue, bytePos); + scrunch.Finish(); + return true; + } catch { + return false; + } + } + + /** + * Write a PNG "IEND" chunk into the pngBytes array. + */ + protected void writeEnd() { + bytePos = WriteInt4(0, bytePos); + bytePos = WriteBytes(IEND, bytePos); + crc.Reset(); + crc.Update(IEND); + crcValue = crc.Value; + bytePos = WriteInt4((int) crcValue, bytePos); + } + } +} \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/Properties/AppManifest.xml b/debian/missing-sources/plupload/csharp/Plupload/Properties/AppManifest.xml new file mode 100644 index 0000000..1b45a1d --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/Properties/AppManifest.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/debian/missing-sources/plupload/csharp/Plupload/Properties/AssemblyInfo.cs b/debian/missing-sources/plupload/csharp/Plupload/Properties/AssemblyInfo.cs new file mode 100644 index 0000000..005e313 --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/Properties/AssemblyInfo.cs @@ -0,0 +1,45 @@ +/** + * $Id: AssemblyInfo.cs 480 2008-10-20 15:37:42Z spocke $ + * + * @package MCManagerCore + * @author Moxiecode + * @copyright Copyright © 2007, Moxiecode Systems AB, All rights reserved. + */ + +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyTitle("Plupload")] +[assembly: AssemblyDescription("Multiple upload component.")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("Moxiecode Systems AB")] +[assembly: AssemblyProduct("Upload")] +[assembly: AssemblyCopyright("Copyright © 2009")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. +[assembly: ComVisible(false)] + +// The following GUID is for the ID of the typelib if this project is exposed to COM +[assembly: Guid("95f0dee8-de7a-46c5-9dcc-0570b0fc4643")] + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Revision and Build Numbers +// by using the '*' as shown below: +// [assembly: AssemblyVersion("1.0.*")] + +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/debian/missing-sources/plupload/csharp/Plupload/Utils/JsonReader.cs b/debian/missing-sources/plupload/csharp/Plupload/Utils/JsonReader.cs new file mode 100644 index 0000000..659c4fe --- /dev/null +++ b/debian/missing-sources/plupload/csharp/Plupload/Utils/JsonReader.cs @@ -0,0 +1,486 @@ +/* + * $Id: JSONReader.cs 9 2007-05-27 10:47:07Z spocke $ + * + * Copyright © 2007, Moxiecode Systems AB, All rights reserved. + */ + +using System; +using System.IO; +using System.Text; +using System.Collections; +using System.Collections.Generic; + +namespace Moxiecode.Plupload.Utils { + class Stack { + private List items; + + public Stack() { + items = new List(); + } + + public void Push(object item) { + items.Add(item); + } + + public object Pop() { + object item = items[items.Count - 1]; + + items.RemoveAt(items.Count - 1); + + return item; + } + } + + /// + /// + /// + public enum JsonLocation { + /// + InArray, + + /// + InObject, + + /// + Normal + } + + /// + /// + /// + public enum JsonToken { + /// + Boolean, + + /// + Integer, + + /// + String, + + /// + Null, + + /// + Float, + + /// + StartArray, + + /// + EndArray, + + /// + PropertyName, + + /// + StartObject, + + /// + EndObject + } + + /// + /// Description of JSONReader. + /// + public class JsonReader { + private TextReader reader; + private JsonToken token; + private object val; + private JsonLocation location; + private Stack lastLocations; + private bool needProp; + + /// + /// + /// + /// + public JsonReader(TextReader reader) { + this.reader = reader; + this.val = null; + this.token = JsonToken.Null; + this.location = JsonLocation.Normal; + this.lastLocations = new Stack(); + this.needProp = false; + } + + public static object ParseJson(String json) { + JsonReader reader = new JsonReader(new StringReader(json)); + + return reader.ReadValue(); + } + + /// + /// + /// + public JsonLocation Location { + get { return location; } + } + + /// + /// + /// + public JsonToken TokenType { + get { + return this.token; + } + } + + /// + /// + /// + public object Value { + get { + return this.val; + } + } + + /// + /// + /// + /// + public bool Read() { + int chr = this.reader.Read(); + + if (chr != -1) { + switch ((char) chr) { + case '[': + this.lastLocations.Push(this.location); + this.location = JsonLocation.InArray; + this.token = JsonToken.StartArray; + this.val = null; + this.ReadAway(); + return true; + + case ']': + this.location = (JsonLocation)this.lastLocations.Pop(); + this.token = JsonToken.EndArray; + this.val = null; + this.ReadAway(); + + if (this.location == JsonLocation.InObject) + this.needProp = true; + + return true; + + case '{': + this.lastLocations.Push(this.location); + this.location = JsonLocation.InObject; + this.needProp = true; + this.token = JsonToken.StartObject; + this.val = null; + this.ReadAway(); + return true; + + case '}': + this.location = (JsonLocation) this.lastLocations.Pop(); + this.token = JsonToken.EndObject; + this.val = null; + this.ReadAway(); + + if (this.location == JsonLocation.InObject) + this.needProp = true; + + return true; + + // String + case '"': + case '\'': + return this.ReadString((char) chr); + + // Null + case 'n': + return this.ReadNull(); + + // Bool + case 't': + case 'f': + return this.ReadBool((char) chr); + + default: + // Is number + if (Char.IsNumber((char) chr) || (char) chr == '-' || (char) chr == '.') + return this.ReadNumber((char) chr); + + return true; + } + } + + return false; + } + + /// + /// + /// + /// + public override string ToString() { + switch (this.token) { + case JsonToken.Boolean: + return "[Boolean] = " + ((bool) this.Value ? "true" : "false"); + + case JsonToken.EndArray: + return "[EndArray]"; + + case JsonToken.EndObject: + return "[EndObject]"; + + case JsonToken.Float: + return "[Float] = " + Convert.ToDouble(this.Value); + + case JsonToken.Integer: + return "[Integer] = " + ((int) this.Value); + + case JsonToken.Null: + return "[Null]"; + + case JsonToken.StartArray: + return "[StartArray]"; + + case JsonToken.StartObject: + return "[StartObject]"; + + case JsonToken.String: + return "[String]" + (string) this.Value; + + case JsonToken.PropertyName: + return "[PropertyName]" + (string) this.Value; + } + + return base.ToString(); + } + + #region private methods + + private bool ReadString(char quote) { + StringBuilder buff = new StringBuilder(); + this.token = JsonToken.String; + bool endString = false; + int chr; + + while ((chr = this.reader.Peek()) != -1) { + switch (chr) { + case '\\': + // Read away slash + chr = this.reader.Read(); + + // Read escape code + chr = this.reader.Read(); + switch (chr) { + case 't': + buff.Append('\t'); + break; + + case 'b': + buff.Append('\b'); + break; + + case 'f': + buff.Append('\f'); + break; + + case 'r': + buff.Append('\r'); + break; + + case 'n': + buff.Append('\n'); + break; + + case 'u': + buff.Append((char) Convert.ToInt32(ReadLen(4), 16)); + break; + + default: + buff.Append((char) chr); + break; + } + + break; + + case '\'': + case '"': + if (chr == quote) + endString = true; + + chr = this.reader.Read(); + if (chr != -1 && chr != quote) + buff.Append((char) chr); + + break; + + default: + buff.Append((char) this.reader.Read()); + break; + } + + // String terminated + if (endString) + break; + } + + this.ReadAway(); + + this.val = buff.ToString(); + + // Needed a property + if (this.needProp) { + this.token = JsonToken.PropertyName; + this.needProp = false; + return true; + } + + if (this.location == JsonLocation.InObject && !this.needProp) + this.needProp = true; + + return true; + } + + private bool ReadNull() { + this.token = JsonToken.Null; + this.val = null; + + this.ReadAway(3); // ull + this.ReadAway(); + + if (this.location == JsonLocation.InObject && !this.needProp) + this.needProp = true; + + return true; + } + + private bool ReadNumber(char start) { + StringBuilder buff = new StringBuilder(); + int chr; + bool isFloat = false; + + this.token = JsonToken.Integer; + buff.Append(start); + + while ((chr = this.reader.Peek()) != -1) { + if (Char.IsNumber((char) chr) || (char) chr == '-' || (char) chr == '.') { + if (((char) chr) == '.') + isFloat = true; + + buff.Append((char) this.reader.Read()); + } else + break; + } + + this.ReadAway(); + + if (isFloat) { + this.token = JsonToken.Float; + this.val = Convert.ToDouble(buff.ToString().Replace('.', ',')); + } else + this.val = Convert.ToInt32(buff.ToString()); + + if (this.location == JsonLocation.InObject && !this.needProp) + this.needProp = true; + + return true; + } + + private bool ReadBool(char chr) { + this.token = JsonToken.Boolean; + this.val = chr == 't'; + + if (chr == 't') + this.ReadAway(3); // rue + else + this.ReadAway(4); // alse + + this.ReadAway(); + + if (this.location == JsonLocation.InObject && !this.needProp) + this.needProp = true; + + return true; + } + + private void ReadAway() { + int chr; + + while ((chr = this.reader.Peek()) != -1) { + if (chr != ':' && chr != ',' && !Char.IsWhiteSpace((char) chr)) + break; + + this.reader.Read(); + } + } + + private string ReadLen(int num) { + StringBuilder buff = new StringBuilder(); + int chr; + + for (int i=0; i) { + ((Dictionary)cur)[key] = this.Value; + } else if (cur is List) + ((List) cur).Add(this.Value); + else + return this.Value; + + break; + + case JsonToken.PropertyName: + key = (string) this.Value; + break; + + case JsonToken.StartArray: + case JsonToken.StartObject: + if (this.TokenType == JsonToken.StartObject) { + obj = new Dictionary(); + } else { + obj = new List(); + } + + if (cur is Dictionary) { + ((Dictionary)cur)[key] = obj; + } else if (cur is List) { + ((List)cur).Add(obj); + } + + parents.Push(cur); + cur = obj; + + break; + + case JsonToken.EndArray: + case JsonToken.EndObject: + obj = parents.Pop(); + + if (obj != null) + cur = obj; + + break; + } + } + + return cur; + } + + #endregion + } +} -- cgit v1.2.3