operation.js 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. // Copyright 2013 Lovell Fuller and others.
  2. // SPDX-License-Identifier: Apache-2.0
  3. 'use strict';
  4. const color = require('color');
  5. const is = require('./is');
  6. /**
  7. * How accurate an operation should be.
  8. * @member
  9. * @private
  10. */
  11. const vipsPrecision = {
  12. integer: 'integer',
  13. float: 'float',
  14. approximate: 'approximate'
  15. };
  16. /**
  17. * Rotate the output image by either an explicit angle
  18. * or auto-orient based on the EXIF `Orientation` tag.
  19. *
  20. * If an angle is provided, it is converted to a valid positive degree rotation.
  21. * For example, `-450` will produce a 270 degree rotation.
  22. *
  23. * When rotating by an angle other than a multiple of 90,
  24. * the background colour can be provided with the `background` option.
  25. *
  26. * If no angle is provided, it is determined from the EXIF data.
  27. * Mirroring is supported and may infer the use of a flip operation.
  28. *
  29. * The use of `rotate` without an angle will remove the EXIF `Orientation` tag, if any.
  30. *
  31. * Only one rotation can occur per pipeline.
  32. * Previous calls to `rotate` in the same pipeline will be ignored.
  33. *
  34. * Multi-page images can only be rotated by 180 degrees.
  35. *
  36. * Method order is important when rotating, resizing and/or extracting regions,
  37. * for example `.rotate(x).extract(y)` will produce a different result to `.extract(y).rotate(x)`.
  38. *
  39. * @example
  40. * const pipeline = sharp()
  41. * .rotate()
  42. * .resize(null, 200)
  43. * .toBuffer(function (err, outputBuffer, info) {
  44. * // outputBuffer contains 200px high JPEG image data,
  45. * // auto-rotated using EXIF Orientation tag
  46. * // info.width and info.height contain the dimensions of the resized image
  47. * });
  48. * readableStream.pipe(pipeline);
  49. *
  50. * @example
  51. * const rotateThenResize = await sharp(input)
  52. * .rotate(90)
  53. * .resize({ width: 16, height: 8, fit: 'fill' })
  54. * .toBuffer();
  55. * const resizeThenRotate = await sharp(input)
  56. * .resize({ width: 16, height: 8, fit: 'fill' })
  57. * .rotate(90)
  58. * .toBuffer();
  59. *
  60. * @param {number} [angle=auto] angle of rotation.
  61. * @param {Object} [options] - if present, is an Object with optional attributes.
  62. * @param {string|Object} [options.background="#000000"] parsed by the [color](https://www.npmjs.org/package/color) module to extract values for red, green, blue and alpha.
  63. * @returns {Sharp}
  64. * @throws {Error} Invalid parameters
  65. */
  66. function rotate (angle, options) {
  67. if (this.options.useExifOrientation || this.options.angle || this.options.rotationAngle) {
  68. this.options.debuglog('ignoring previous rotate options');
  69. }
  70. if (!is.defined(angle)) {
  71. this.options.useExifOrientation = true;
  72. } else if (is.integer(angle) && !(angle % 90)) {
  73. this.options.angle = angle;
  74. } else if (is.number(angle)) {
  75. this.options.rotationAngle = angle;
  76. if (is.object(options) && options.background) {
  77. const backgroundColour = color(options.background);
  78. this.options.rotationBackground = [
  79. backgroundColour.red(),
  80. backgroundColour.green(),
  81. backgroundColour.blue(),
  82. Math.round(backgroundColour.alpha() * 255)
  83. ];
  84. }
  85. } else {
  86. throw is.invalidParameterError('angle', 'numeric', angle);
  87. }
  88. return this;
  89. }
  90. /**
  91. * Mirror the image vertically (up-down) about the x-axis.
  92. * This always occurs before rotation, if any.
  93. *
  94. * This operation does not work correctly with multi-page images.
  95. *
  96. * @example
  97. * const output = await sharp(input).flip().toBuffer();
  98. *
  99. * @param {Boolean} [flip=true]
  100. * @returns {Sharp}
  101. */
  102. function flip (flip) {
  103. this.options.flip = is.bool(flip) ? flip : true;
  104. return this;
  105. }
  106. /**
  107. * Mirror the image horizontally (left-right) about the y-axis.
  108. * This always occurs before rotation, if any.
  109. *
  110. * @example
  111. * const output = await sharp(input).flop().toBuffer();
  112. *
  113. * @param {Boolean} [flop=true]
  114. * @returns {Sharp}
  115. */
  116. function flop (flop) {
  117. this.options.flop = is.bool(flop) ? flop : true;
  118. return this;
  119. }
  120. /**
  121. * Perform an affine transform on an image. This operation will always occur after resizing, extraction and rotation, if any.
  122. *
  123. * You must provide an array of length 4 or a 2x2 affine transformation matrix.
  124. * By default, new pixels are filled with a black background. You can provide a background color with the `background` option.
  125. * A particular interpolator may also be specified. Set the `interpolator` option to an attribute of the `sharp.interpolators` Object e.g. `sharp.interpolators.nohalo`.
  126. *
  127. * In the case of a 2x2 matrix, the transform is:
  128. * - X = `matrix[0, 0]` \* (x + `idx`) + `matrix[0, 1]` \* (y + `idy`) + `odx`
  129. * - Y = `matrix[1, 0]` \* (x + `idx`) + `matrix[1, 1]` \* (y + `idy`) + `ody`
  130. *
  131. * where:
  132. * - x and y are the coordinates in input image.
  133. * - X and Y are the coordinates in output image.
  134. * - (0,0) is the upper left corner.
  135. *
  136. * @since 0.27.0
  137. *
  138. * @example
  139. * const pipeline = sharp()
  140. * .affine([[1, 0.3], [0.1, 0.7]], {
  141. * background: 'white',
  142. * interpolator: sharp.interpolators.nohalo
  143. * })
  144. * .toBuffer((err, outputBuffer, info) => {
  145. * // outputBuffer contains the transformed image
  146. * // info.width and info.height contain the new dimensions
  147. * });
  148. *
  149. * inputStream
  150. * .pipe(pipeline);
  151. *
  152. * @param {Array<Array<number>>|Array<number>} matrix - affine transformation matrix
  153. * @param {Object} [options] - if present, is an Object with optional attributes.
  154. * @param {String|Object} [options.background="#000000"] - parsed by the [color](https://www.npmjs.org/package/color) module to extract values for red, green, blue and alpha.
  155. * @param {Number} [options.idx=0] - input horizontal offset
  156. * @param {Number} [options.idy=0] - input vertical offset
  157. * @param {Number} [options.odx=0] - output horizontal offset
  158. * @param {Number} [options.ody=0] - output vertical offset
  159. * @param {String} [options.interpolator=sharp.interpolators.bicubic] - interpolator
  160. * @returns {Sharp}
  161. * @throws {Error} Invalid parameters
  162. */
  163. function affine (matrix, options) {
  164. const flatMatrix = [].concat(...matrix);
  165. if (flatMatrix.length === 4 && flatMatrix.every(is.number)) {
  166. this.options.affineMatrix = flatMatrix;
  167. } else {
  168. throw is.invalidParameterError('matrix', '1x4 or 2x2 array', matrix);
  169. }
  170. if (is.defined(options)) {
  171. if (is.object(options)) {
  172. this._setBackgroundColourOption('affineBackground', options.background);
  173. if (is.defined(options.idx)) {
  174. if (is.number(options.idx)) {
  175. this.options.affineIdx = options.idx;
  176. } else {
  177. throw is.invalidParameterError('options.idx', 'number', options.idx);
  178. }
  179. }
  180. if (is.defined(options.idy)) {
  181. if (is.number(options.idy)) {
  182. this.options.affineIdy = options.idy;
  183. } else {
  184. throw is.invalidParameterError('options.idy', 'number', options.idy);
  185. }
  186. }
  187. if (is.defined(options.odx)) {
  188. if (is.number(options.odx)) {
  189. this.options.affineOdx = options.odx;
  190. } else {
  191. throw is.invalidParameterError('options.odx', 'number', options.odx);
  192. }
  193. }
  194. if (is.defined(options.ody)) {
  195. if (is.number(options.ody)) {
  196. this.options.affineOdy = options.ody;
  197. } else {
  198. throw is.invalidParameterError('options.ody', 'number', options.ody);
  199. }
  200. }
  201. if (is.defined(options.interpolator)) {
  202. if (is.inArray(options.interpolator, Object.values(this.constructor.interpolators))) {
  203. this.options.affineInterpolator = options.interpolator;
  204. } else {
  205. throw is.invalidParameterError('options.interpolator', 'valid interpolator name', options.interpolator);
  206. }
  207. }
  208. } else {
  209. throw is.invalidParameterError('options', 'object', options);
  210. }
  211. }
  212. return this;
  213. }
  214. /**
  215. * Sharpen the image.
  216. *
  217. * When used without parameters, performs a fast, mild sharpen of the output image.
  218. *
  219. * When a `sigma` is provided, performs a slower, more accurate sharpen of the L channel in the LAB colour space.
  220. * Fine-grained control over the level of sharpening in "flat" (m1) and "jagged" (m2) areas is available.
  221. *
  222. * See {@link https://www.libvips.org/API/current/libvips-convolution.html#vips-sharpen|libvips sharpen} operation.
  223. *
  224. * @example
  225. * const data = await sharp(input).sharpen().toBuffer();
  226. *
  227. * @example
  228. * const data = await sharp(input).sharpen({ sigma: 2 }).toBuffer();
  229. *
  230. * @example
  231. * const data = await sharp(input)
  232. * .sharpen({
  233. * sigma: 2,
  234. * m1: 0,
  235. * m2: 3,
  236. * x1: 3,
  237. * y2: 15,
  238. * y3: 15,
  239. * })
  240. * .toBuffer();
  241. *
  242. * @param {Object|number} [options] - if present, is an Object with attributes
  243. * @param {number} [options.sigma] - the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`, between 0.000001 and 10
  244. * @param {number} [options.m1=1.0] - the level of sharpening to apply to "flat" areas, between 0 and 1000000
  245. * @param {number} [options.m2=2.0] - the level of sharpening to apply to "jagged" areas, between 0 and 1000000
  246. * @param {number} [options.x1=2.0] - threshold between "flat" and "jagged", between 0 and 1000000
  247. * @param {number} [options.y2=10.0] - maximum amount of brightening, between 0 and 1000000
  248. * @param {number} [options.y3=20.0] - maximum amount of darkening, between 0 and 1000000
  249. * @param {number} [flat] - (deprecated) see `options.m1`.
  250. * @param {number} [jagged] - (deprecated) see `options.m2`.
  251. * @returns {Sharp}
  252. * @throws {Error} Invalid parameters
  253. */
  254. function sharpen (options, flat, jagged) {
  255. if (!is.defined(options)) {
  256. // No arguments: default to mild sharpen
  257. this.options.sharpenSigma = -1;
  258. } else if (is.bool(options)) {
  259. // Deprecated boolean argument: apply mild sharpen?
  260. this.options.sharpenSigma = options ? -1 : 0;
  261. } else if (is.number(options) && is.inRange(options, 0.01, 10000)) {
  262. // Deprecated numeric argument: specific sigma
  263. this.options.sharpenSigma = options;
  264. // Deprecated control over flat areas
  265. if (is.defined(flat)) {
  266. if (is.number(flat) && is.inRange(flat, 0, 10000)) {
  267. this.options.sharpenM1 = flat;
  268. } else {
  269. throw is.invalidParameterError('flat', 'number between 0 and 10000', flat);
  270. }
  271. }
  272. // Deprecated control over jagged areas
  273. if (is.defined(jagged)) {
  274. if (is.number(jagged) && is.inRange(jagged, 0, 10000)) {
  275. this.options.sharpenM2 = jagged;
  276. } else {
  277. throw is.invalidParameterError('jagged', 'number between 0 and 10000', jagged);
  278. }
  279. }
  280. } else if (is.plainObject(options)) {
  281. if (is.number(options.sigma) && is.inRange(options.sigma, 0.000001, 10)) {
  282. this.options.sharpenSigma = options.sigma;
  283. } else {
  284. throw is.invalidParameterError('options.sigma', 'number between 0.000001 and 10', options.sigma);
  285. }
  286. if (is.defined(options.m1)) {
  287. if (is.number(options.m1) && is.inRange(options.m1, 0, 1000000)) {
  288. this.options.sharpenM1 = options.m1;
  289. } else {
  290. throw is.invalidParameterError('options.m1', 'number between 0 and 1000000', options.m1);
  291. }
  292. }
  293. if (is.defined(options.m2)) {
  294. if (is.number(options.m2) && is.inRange(options.m2, 0, 1000000)) {
  295. this.options.sharpenM2 = options.m2;
  296. } else {
  297. throw is.invalidParameterError('options.m2', 'number between 0 and 1000000', options.m2);
  298. }
  299. }
  300. if (is.defined(options.x1)) {
  301. if (is.number(options.x1) && is.inRange(options.x1, 0, 1000000)) {
  302. this.options.sharpenX1 = options.x1;
  303. } else {
  304. throw is.invalidParameterError('options.x1', 'number between 0 and 1000000', options.x1);
  305. }
  306. }
  307. if (is.defined(options.y2)) {
  308. if (is.number(options.y2) && is.inRange(options.y2, 0, 1000000)) {
  309. this.options.sharpenY2 = options.y2;
  310. } else {
  311. throw is.invalidParameterError('options.y2', 'number between 0 and 1000000', options.y2);
  312. }
  313. }
  314. if (is.defined(options.y3)) {
  315. if (is.number(options.y3) && is.inRange(options.y3, 0, 1000000)) {
  316. this.options.sharpenY3 = options.y3;
  317. } else {
  318. throw is.invalidParameterError('options.y3', 'number between 0 and 1000000', options.y3);
  319. }
  320. }
  321. } else {
  322. throw is.invalidParameterError('sigma', 'number between 0.01 and 10000', options);
  323. }
  324. return this;
  325. }
  326. /**
  327. * Apply median filter.
  328. * When used without parameters the default window is 3x3.
  329. *
  330. * @example
  331. * const output = await sharp(input).median().toBuffer();
  332. *
  333. * @example
  334. * const output = await sharp(input).median(5).toBuffer();
  335. *
  336. * @param {number} [size=3] square mask size: size x size
  337. * @returns {Sharp}
  338. * @throws {Error} Invalid parameters
  339. */
  340. function median (size) {
  341. if (!is.defined(size)) {
  342. // No arguments: default to 3x3
  343. this.options.medianSize = 3;
  344. } else if (is.integer(size) && is.inRange(size, 1, 1000)) {
  345. // Numeric argument: specific sigma
  346. this.options.medianSize = size;
  347. } else {
  348. throw is.invalidParameterError('size', 'integer between 1 and 1000', size);
  349. }
  350. return this;
  351. }
  352. /**
  353. * Blur the image.
  354. *
  355. * When used without parameters, performs a fast 3x3 box blur (equivalent to a box linear filter).
  356. *
  357. * When a `sigma` is provided, performs a slower, more accurate Gaussian blur.
  358. *
  359. * @example
  360. * const boxBlurred = await sharp(input)
  361. * .blur()
  362. * .toBuffer();
  363. *
  364. * @example
  365. * const gaussianBlurred = await sharp(input)
  366. * .blur(5)
  367. * .toBuffer();
  368. *
  369. * @param {Object|number|Boolean} [options]
  370. * @param {number} [options.sigma] a value between 0.3 and 1000 representing the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
  371. * @param {string} [options.precision='integer'] How accurate the operation should be, one of: integer, float, approximate.
  372. * @param {number} [options.minAmplitude=0.2] A value between 0.001 and 1. A smaller value will generate a larger, more accurate mask.
  373. * @returns {Sharp}
  374. * @throws {Error} Invalid parameters
  375. */
  376. function blur (options) {
  377. let sigma;
  378. if (is.number(options)) {
  379. sigma = options;
  380. } else if (is.plainObject(options)) {
  381. if (!is.number(options.sigma)) {
  382. throw is.invalidParameterError('options.sigma', 'number between 0.3 and 1000', sigma);
  383. }
  384. sigma = options.sigma;
  385. if ('precision' in options) {
  386. if (is.string(vipsPrecision[options.precision])) {
  387. this.options.precision = vipsPrecision[options.precision];
  388. } else {
  389. throw is.invalidParameterError('precision', 'one of: integer, float, approximate', options.precision);
  390. }
  391. }
  392. if ('minAmplitude' in options) {
  393. if (is.number(options.minAmplitude) && is.inRange(options.minAmplitude, 0.001, 1)) {
  394. this.options.minAmpl = options.minAmplitude;
  395. } else {
  396. throw is.invalidParameterError('minAmplitude', 'number between 0.001 and 1', options.minAmplitude);
  397. }
  398. }
  399. }
  400. if (!is.defined(options)) {
  401. // No arguments: default to mild blur
  402. this.options.blurSigma = -1;
  403. } else if (is.bool(options)) {
  404. // Boolean argument: apply mild blur?
  405. this.options.blurSigma = options ? -1 : 0;
  406. } else if (is.number(sigma) && is.inRange(sigma, 0.3, 1000)) {
  407. // Numeric argument: specific sigma
  408. this.options.blurSigma = sigma;
  409. } else {
  410. throw is.invalidParameterError('sigma', 'number between 0.3 and 1000', sigma);
  411. }
  412. return this;
  413. }
  414. /**
  415. * Merge alpha transparency channel, if any, with a background, then remove the alpha channel.
  416. *
  417. * See also {@link /api-channel#removealpha|removeAlpha}.
  418. *
  419. * @example
  420. * await sharp(rgbaInput)
  421. * .flatten({ background: '#F0A703' })
  422. * .toBuffer();
  423. *
  424. * @param {Object} [options]
  425. * @param {string|Object} [options.background={r: 0, g: 0, b: 0}] - background colour, parsed by the [color](https://www.npmjs.org/package/color) module, defaults to black.
  426. * @returns {Sharp}
  427. */
  428. function flatten (options) {
  429. this.options.flatten = is.bool(options) ? options : true;
  430. if (is.object(options)) {
  431. this._setBackgroundColourOption('flattenBackground', options.background);
  432. }
  433. return this;
  434. }
  435. /**
  436. * Ensure the image has an alpha channel
  437. * with all white pixel values made fully transparent.
  438. *
  439. * Existing alpha channel values for non-white pixels remain unchanged.
  440. *
  441. * This feature is experimental and the API may change.
  442. *
  443. * @since 0.32.1
  444. *
  445. * @example
  446. * await sharp(rgbInput)
  447. * .unflatten()
  448. * .toBuffer();
  449. *
  450. * @example
  451. * await sharp(rgbInput)
  452. * .threshold(128, { grayscale: false }) // converter bright pixels to white
  453. * .unflatten()
  454. * .toBuffer();
  455. */
  456. function unflatten () {
  457. this.options.unflatten = true;
  458. return this;
  459. }
  460. /**
  461. * Apply a gamma correction by reducing the encoding (darken) pre-resize at a factor of `1/gamma`
  462. * then increasing the encoding (brighten) post-resize at a factor of `gamma`.
  463. * This can improve the perceived brightness of a resized image in non-linear colour spaces.
  464. * JPEG and WebP input images will not take advantage of the shrink-on-load performance optimisation
  465. * when applying a gamma correction.
  466. *
  467. * Supply a second argument to use a different output gamma value, otherwise the first value is used in both cases.
  468. *
  469. * @param {number} [gamma=2.2] value between 1.0 and 3.0.
  470. * @param {number} [gammaOut] value between 1.0 and 3.0. (optional, defaults to same as `gamma`)
  471. * @returns {Sharp}
  472. * @throws {Error} Invalid parameters
  473. */
  474. function gamma (gamma, gammaOut) {
  475. if (!is.defined(gamma)) {
  476. // Default gamma correction of 2.2 (sRGB)
  477. this.options.gamma = 2.2;
  478. } else if (is.number(gamma) && is.inRange(gamma, 1, 3)) {
  479. this.options.gamma = gamma;
  480. } else {
  481. throw is.invalidParameterError('gamma', 'number between 1.0 and 3.0', gamma);
  482. }
  483. if (!is.defined(gammaOut)) {
  484. // Default gamma correction for output is same as input
  485. this.options.gammaOut = this.options.gamma;
  486. } else if (is.number(gammaOut) && is.inRange(gammaOut, 1, 3)) {
  487. this.options.gammaOut = gammaOut;
  488. } else {
  489. throw is.invalidParameterError('gammaOut', 'number between 1.0 and 3.0', gammaOut);
  490. }
  491. return this;
  492. }
  493. /**
  494. * Produce the "negative" of the image.
  495. *
  496. * @example
  497. * const output = await sharp(input)
  498. * .negate()
  499. * .toBuffer();
  500. *
  501. * @example
  502. * const output = await sharp(input)
  503. * .negate({ alpha: false })
  504. * .toBuffer();
  505. *
  506. * @param {Object} [options]
  507. * @param {Boolean} [options.alpha=true] Whether or not to negate any alpha channel
  508. * @returns {Sharp}
  509. */
  510. function negate (options) {
  511. this.options.negate = is.bool(options) ? options : true;
  512. if (is.plainObject(options) && 'alpha' in options) {
  513. if (!is.bool(options.alpha)) {
  514. throw is.invalidParameterError('alpha', 'should be boolean value', options.alpha);
  515. } else {
  516. this.options.negateAlpha = options.alpha;
  517. }
  518. }
  519. return this;
  520. }
  521. /**
  522. * Enhance output image contrast by stretching its luminance to cover a full dynamic range.
  523. *
  524. * Uses a histogram-based approach, taking a default range of 1% to 99% to reduce sensitivity to noise at the extremes.
  525. *
  526. * Luminance values below the `lower` percentile will be underexposed by clipping to zero.
  527. * Luminance values above the `upper` percentile will be overexposed by clipping to the max pixel value.
  528. *
  529. * @example
  530. * const output = await sharp(input)
  531. * .normalise()
  532. * .toBuffer();
  533. *
  534. * @example
  535. * const output = await sharp(input)
  536. * .normalise({ lower: 0, upper: 100 })
  537. * .toBuffer();
  538. *
  539. * @param {Object} [options]
  540. * @param {number} [options.lower=1] - Percentile below which luminance values will be underexposed.
  541. * @param {number} [options.upper=99] - Percentile above which luminance values will be overexposed.
  542. * @returns {Sharp}
  543. */
  544. function normalise (options) {
  545. if (is.plainObject(options)) {
  546. if (is.defined(options.lower)) {
  547. if (is.number(options.lower) && is.inRange(options.lower, 0, 99)) {
  548. this.options.normaliseLower = options.lower;
  549. } else {
  550. throw is.invalidParameterError('lower', 'number between 0 and 99', options.lower);
  551. }
  552. }
  553. if (is.defined(options.upper)) {
  554. if (is.number(options.upper) && is.inRange(options.upper, 1, 100)) {
  555. this.options.normaliseUpper = options.upper;
  556. } else {
  557. throw is.invalidParameterError('upper', 'number between 1 and 100', options.upper);
  558. }
  559. }
  560. }
  561. if (this.options.normaliseLower >= this.options.normaliseUpper) {
  562. throw is.invalidParameterError('range', 'lower to be less than upper',
  563. `${this.options.normaliseLower} >= ${this.options.normaliseUpper}`);
  564. }
  565. this.options.normalise = true;
  566. return this;
  567. }
  568. /**
  569. * Alternative spelling of normalise.
  570. *
  571. * @example
  572. * const output = await sharp(input)
  573. * .normalize()
  574. * .toBuffer();
  575. *
  576. * @param {Object} [options]
  577. * @param {number} [options.lower=1] - Percentile below which luminance values will be underexposed.
  578. * @param {number} [options.upper=99] - Percentile above which luminance values will be overexposed.
  579. * @returns {Sharp}
  580. */
  581. function normalize (options) {
  582. return this.normalise(options);
  583. }
  584. /**
  585. * Perform contrast limiting adaptive histogram equalization
  586. * {@link https://en.wikipedia.org/wiki/Adaptive_histogram_equalization#Contrast_Limited_AHE|CLAHE}.
  587. *
  588. * This will, in general, enhance the clarity of the image by bringing out darker details.
  589. *
  590. * @since 0.28.3
  591. *
  592. * @example
  593. * const output = await sharp(input)
  594. * .clahe({
  595. * width: 3,
  596. * height: 3,
  597. * })
  598. * .toBuffer();
  599. *
  600. * @param {Object} options
  601. * @param {number} options.width - Integral width of the search window, in pixels.
  602. * @param {number} options.height - Integral height of the search window, in pixels.
  603. * @param {number} [options.maxSlope=3] - Integral level of brightening, between 0 and 100, where 0 disables contrast limiting.
  604. * @returns {Sharp}
  605. * @throws {Error} Invalid parameters
  606. */
  607. function clahe (options) {
  608. if (is.plainObject(options)) {
  609. if (is.integer(options.width) && options.width > 0) {
  610. this.options.claheWidth = options.width;
  611. } else {
  612. throw is.invalidParameterError('width', 'integer greater than zero', options.width);
  613. }
  614. if (is.integer(options.height) && options.height > 0) {
  615. this.options.claheHeight = options.height;
  616. } else {
  617. throw is.invalidParameterError('height', 'integer greater than zero', options.height);
  618. }
  619. if (is.defined(options.maxSlope)) {
  620. if (is.integer(options.maxSlope) && is.inRange(options.maxSlope, 0, 100)) {
  621. this.options.claheMaxSlope = options.maxSlope;
  622. } else {
  623. throw is.invalidParameterError('maxSlope', 'integer between 0 and 100', options.maxSlope);
  624. }
  625. }
  626. } else {
  627. throw is.invalidParameterError('options', 'plain object', options);
  628. }
  629. return this;
  630. }
  631. /**
  632. * Convolve the image with the specified kernel.
  633. *
  634. * @example
  635. * sharp(input)
  636. * .convolve({
  637. * width: 3,
  638. * height: 3,
  639. * kernel: [-1, 0, 1, -2, 0, 2, -1, 0, 1]
  640. * })
  641. * .raw()
  642. * .toBuffer(function(err, data, info) {
  643. * // data contains the raw pixel data representing the convolution
  644. * // of the input image with the horizontal Sobel operator
  645. * });
  646. *
  647. * @param {Object} kernel
  648. * @param {number} kernel.width - width of the kernel in pixels.
  649. * @param {number} kernel.height - height of the kernel in pixels.
  650. * @param {Array<number>} kernel.kernel - Array of length `width*height` containing the kernel values.
  651. * @param {number} [kernel.scale=sum] - the scale of the kernel in pixels.
  652. * @param {number} [kernel.offset=0] - the offset of the kernel in pixels.
  653. * @returns {Sharp}
  654. * @throws {Error} Invalid parameters
  655. */
  656. function convolve (kernel) {
  657. if (!is.object(kernel) || !Array.isArray(kernel.kernel) ||
  658. !is.integer(kernel.width) || !is.integer(kernel.height) ||
  659. !is.inRange(kernel.width, 3, 1001) || !is.inRange(kernel.height, 3, 1001) ||
  660. kernel.height * kernel.width !== kernel.kernel.length
  661. ) {
  662. // must pass in a kernel
  663. throw new Error('Invalid convolution kernel');
  664. }
  665. // Default scale is sum of kernel values
  666. if (!is.integer(kernel.scale)) {
  667. kernel.scale = kernel.kernel.reduce(function (a, b) {
  668. return a + b;
  669. }, 0);
  670. }
  671. // Clip scale to a minimum value of 1
  672. if (kernel.scale < 1) {
  673. kernel.scale = 1;
  674. }
  675. if (!is.integer(kernel.offset)) {
  676. kernel.offset = 0;
  677. }
  678. this.options.convKernel = kernel;
  679. return this;
  680. }
  681. /**
  682. * Any pixel value greater than or equal to the threshold value will be set to 255, otherwise it will be set to 0.
  683. * @param {number} [threshold=128] - a value in the range 0-255 representing the level at which the threshold will be applied.
  684. * @param {Object} [options]
  685. * @param {Boolean} [options.greyscale=true] - convert to single channel greyscale.
  686. * @param {Boolean} [options.grayscale=true] - alternative spelling for greyscale.
  687. * @returns {Sharp}
  688. * @throws {Error} Invalid parameters
  689. */
  690. function threshold (threshold, options) {
  691. if (!is.defined(threshold)) {
  692. this.options.threshold = 128;
  693. } else if (is.bool(threshold)) {
  694. this.options.threshold = threshold ? 128 : 0;
  695. } else if (is.integer(threshold) && is.inRange(threshold, 0, 255)) {
  696. this.options.threshold = threshold;
  697. } else {
  698. throw is.invalidParameterError('threshold', 'integer between 0 and 255', threshold);
  699. }
  700. if (!is.object(options) || options.greyscale === true || options.grayscale === true) {
  701. this.options.thresholdGrayscale = true;
  702. } else {
  703. this.options.thresholdGrayscale = false;
  704. }
  705. return this;
  706. }
  707. /**
  708. * Perform a bitwise boolean operation with operand image.
  709. *
  710. * This operation creates an output image where each pixel is the result of
  711. * the selected bitwise boolean `operation` between the corresponding pixels of the input images.
  712. *
  713. * @param {Buffer|string} operand - Buffer containing image data or string containing the path to an image file.
  714. * @param {string} operator - one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
  715. * @param {Object} [options]
  716. * @param {Object} [options.raw] - describes operand when using raw pixel data.
  717. * @param {number} [options.raw.width]
  718. * @param {number} [options.raw.height]
  719. * @param {number} [options.raw.channels]
  720. * @returns {Sharp}
  721. * @throws {Error} Invalid parameters
  722. */
  723. function boolean (operand, operator, options) {
  724. this.options.boolean = this._createInputDescriptor(operand, options);
  725. if (is.string(operator) && is.inArray(operator, ['and', 'or', 'eor'])) {
  726. this.options.booleanOp = operator;
  727. } else {
  728. throw is.invalidParameterError('operator', 'one of: and, or, eor', operator);
  729. }
  730. return this;
  731. }
  732. /**
  733. * Apply the linear formula `a` * input + `b` to the image to adjust image levels.
  734. *
  735. * When a single number is provided, it will be used for all image channels.
  736. * When an array of numbers is provided, the array length must match the number of channels.
  737. *
  738. * @example
  739. * await sharp(input)
  740. * .linear(0.5, 2)
  741. * .toBuffer();
  742. *
  743. * @example
  744. * await sharp(rgbInput)
  745. * .linear(
  746. * [0.25, 0.5, 0.75],
  747. * [150, 100, 50]
  748. * )
  749. * .toBuffer();
  750. *
  751. * @param {(number|number[])} [a=[]] multiplier
  752. * @param {(number|number[])} [b=[]] offset
  753. * @returns {Sharp}
  754. * @throws {Error} Invalid parameters
  755. */
  756. function linear (a, b) {
  757. if (!is.defined(a) && is.number(b)) {
  758. a = 1.0;
  759. } else if (is.number(a) && !is.defined(b)) {
  760. b = 0.0;
  761. }
  762. if (!is.defined(a)) {
  763. this.options.linearA = [];
  764. } else if (is.number(a)) {
  765. this.options.linearA = [a];
  766. } else if (Array.isArray(a) && a.length && a.every(is.number)) {
  767. this.options.linearA = a;
  768. } else {
  769. throw is.invalidParameterError('a', 'number or array of numbers', a);
  770. }
  771. if (!is.defined(b)) {
  772. this.options.linearB = [];
  773. } else if (is.number(b)) {
  774. this.options.linearB = [b];
  775. } else if (Array.isArray(b) && b.length && b.every(is.number)) {
  776. this.options.linearB = b;
  777. } else {
  778. throw is.invalidParameterError('b', 'number or array of numbers', b);
  779. }
  780. if (this.options.linearA.length !== this.options.linearB.length) {
  781. throw new Error('Expected a and b to be arrays of the same length');
  782. }
  783. return this;
  784. }
  785. /**
  786. * Recombine the image with the specified matrix.
  787. *
  788. * @since 0.21.1
  789. *
  790. * @example
  791. * sharp(input)
  792. * .recomb([
  793. * [0.3588, 0.7044, 0.1368],
  794. * [0.2990, 0.5870, 0.1140],
  795. * [0.2392, 0.4696, 0.0912],
  796. * ])
  797. * .raw()
  798. * .toBuffer(function(err, data, info) {
  799. * // data contains the raw pixel data after applying the matrix
  800. * // With this example input, a sepia filter has been applied
  801. * });
  802. *
  803. * @param {Array<Array<number>>} inputMatrix - 3x3 or 4x4 Recombination matrix
  804. * @returns {Sharp}
  805. * @throws {Error} Invalid parameters
  806. */
  807. function recomb (inputMatrix) {
  808. if (!Array.isArray(inputMatrix)) {
  809. throw is.invalidParameterError('inputMatrix', 'array', inputMatrix);
  810. }
  811. if (inputMatrix.length !== 3 && inputMatrix.length !== 4) {
  812. throw is.invalidParameterError('inputMatrix', '3x3 or 4x4 array', inputMatrix.length);
  813. }
  814. const recombMatrix = inputMatrix.flat().map(Number);
  815. if (recombMatrix.length !== 9 && recombMatrix.length !== 16) {
  816. throw is.invalidParameterError('inputMatrix', 'cardinality of 9 or 16', recombMatrix.length);
  817. }
  818. this.options.recombMatrix = recombMatrix;
  819. return this;
  820. }
  821. /**
  822. * Transforms the image using brightness, saturation, hue rotation, and lightness.
  823. * Brightness and lightness both operate on luminance, with the difference being that
  824. * brightness is multiplicative whereas lightness is additive.
  825. *
  826. * @since 0.22.1
  827. *
  828. * @example
  829. * // increase brightness by a factor of 2
  830. * const output = await sharp(input)
  831. * .modulate({
  832. * brightness: 2
  833. * })
  834. * .toBuffer();
  835. *
  836. * @example
  837. * // hue-rotate by 180 degrees
  838. * const output = await sharp(input)
  839. * .modulate({
  840. * hue: 180
  841. * })
  842. * .toBuffer();
  843. *
  844. * @example
  845. * // increase lightness by +50
  846. * const output = await sharp(input)
  847. * .modulate({
  848. * lightness: 50
  849. * })
  850. * .toBuffer();
  851. *
  852. * @example
  853. * // decrease brightness and saturation while also hue-rotating by 90 degrees
  854. * const output = await sharp(input)
  855. * .modulate({
  856. * brightness: 0.5,
  857. * saturation: 0.5,
  858. * hue: 90,
  859. * })
  860. * .toBuffer();
  861. *
  862. * @param {Object} [options]
  863. * @param {number} [options.brightness] Brightness multiplier
  864. * @param {number} [options.saturation] Saturation multiplier
  865. * @param {number} [options.hue] Degrees for hue rotation
  866. * @param {number} [options.lightness] Lightness addend
  867. * @returns {Sharp}
  868. */
  869. function modulate (options) {
  870. if (!is.plainObject(options)) {
  871. throw is.invalidParameterError('options', 'plain object', options);
  872. }
  873. if ('brightness' in options) {
  874. if (is.number(options.brightness) && options.brightness >= 0) {
  875. this.options.brightness = options.brightness;
  876. } else {
  877. throw is.invalidParameterError('brightness', 'number above zero', options.brightness);
  878. }
  879. }
  880. if ('saturation' in options) {
  881. if (is.number(options.saturation) && options.saturation >= 0) {
  882. this.options.saturation = options.saturation;
  883. } else {
  884. throw is.invalidParameterError('saturation', 'number above zero', options.saturation);
  885. }
  886. }
  887. if ('hue' in options) {
  888. if (is.integer(options.hue)) {
  889. this.options.hue = options.hue % 360;
  890. } else {
  891. throw is.invalidParameterError('hue', 'number', options.hue);
  892. }
  893. }
  894. if ('lightness' in options) {
  895. if (is.number(options.lightness)) {
  896. this.options.lightness = options.lightness;
  897. } else {
  898. throw is.invalidParameterError('lightness', 'number', options.lightness);
  899. }
  900. }
  901. return this;
  902. }
  903. /**
  904. * Decorate the Sharp prototype with operation-related functions.
  905. * @private
  906. */
  907. module.exports = function (Sharp) {
  908. Object.assign(Sharp.prototype, {
  909. rotate,
  910. flip,
  911. flop,
  912. affine,
  913. sharpen,
  914. median,
  915. blur,
  916. flatten,
  917. unflatten,
  918. gamma,
  919. negate,
  920. normalise,
  921. normalize,
  922. clahe,
  923. convolve,
  924. threshold,
  925. boolean,
  926. linear,
  927. recomb,
  928. modulate
  929. });
  930. };