From 53c162b4a2807d51c5885118ab0a10dc7943dc14 Mon Sep 17 00:00:00 2001 From: Eduardo Montalvo Date: Fri, 3 Nov 2017 14:02:16 -0600 Subject: [PATCH 1/7] Add spanish translation skeleton --- translations/spanish/README.md | 674 ++++++++++++++++++ .../sections/drafts/readme-general-toc-1.md | 84 +++ .../sections/drafts/readme-general-toc-2.md | 78 ++ .../sections/drafts/readme-general-toc-3.md | 82 +++ .../sections/drafts/readme-general-toc-4.md | 115 +++ .../sections/errorhandling/apmproducts.md | 29 + .../errorhandling/asyncerrorhandling.md | 56 ++ .../catchunhandledpromiserejection.md | 58 ++ .../errorhandling/centralizedhandling.md | 83 +++ .../errorhandling/documentingusingswagger.md | 15 + .../sections/errorhandling/failfast.md | 50 ++ .../sections/errorhandling/monitoring.md | 18 + .../operationalvsprogrammererror.md | 51 ++ .../errorhandling/shuttingtheprocess.md | 54 ++ .../errorhandling/testingerrorflows.md | 37 + .../sections/errorhandling/usematurelogger.md | 51 ++ .../errorhandling/useonlythebuiltinerror.md | 78 ++ .../sections/production/apmproducts.md | 27 + .../production/assigntransactionid.md | 41 ++ .../sections/production/bestateless.md | 39 + .../production/createmaintenanceendpoint.md | 35 + .../sections/production/delegatetoproxy.md | 50 ++ .../production/detectvulnerabilities.md | 26 + .../sections/production/frontendout.md | 41 ++ .../sections/production/guardprocess.md | 19 + .../sections/production/lockdependencies.md | 74 ++ .../sections/production/measurememory.md | 26 + .../spanish/sections/production/monitoring.md | 39 + .../sections/production/productoncode.md | 17 + .../spanish/sections/production/setnodeenv.md | 35 + .../sections/production/smartlogging.md | 43 ++ .../spanish/sections/production/utilizecpu.md | 27 + .../projectstructre/breakintcomponents.md | 26 + .../sections/projectstructre/configguide.md | 34 + .../sections/projectstructre/createlayers.md | 11 + .../projectstructre/separateexpress.md | 47 ++ .../projectstructre/thincomponents.md | 26 + .../sections/projectstructre/wraputilities.md | 14 + translations/spanish/sections/template.md | 40 ++ .../sections/testingandquality/bumpversion.md | 30 + 40 files changed, 2380 insertions(+) create mode 100644 translations/spanish/README.md create mode 100644 translations/spanish/sections/drafts/readme-general-toc-1.md create mode 100644 translations/spanish/sections/drafts/readme-general-toc-2.md create mode 100644 translations/spanish/sections/drafts/readme-general-toc-3.md create mode 100644 translations/spanish/sections/drafts/readme-general-toc-4.md create mode 100644 translations/spanish/sections/errorhandling/apmproducts.md create mode 100644 translations/spanish/sections/errorhandling/asyncerrorhandling.md create mode 100644 translations/spanish/sections/errorhandling/catchunhandledpromiserejection.md create mode 100644 translations/spanish/sections/errorhandling/centralizedhandling.md create mode 100644 translations/spanish/sections/errorhandling/documentingusingswagger.md create mode 100644 translations/spanish/sections/errorhandling/failfast.md create mode 100644 translations/spanish/sections/errorhandling/monitoring.md create mode 100644 translations/spanish/sections/errorhandling/operationalvsprogrammererror.md create mode 100644 translations/spanish/sections/errorhandling/shuttingtheprocess.md create mode 100644 translations/spanish/sections/errorhandling/testingerrorflows.md create mode 100644 translations/spanish/sections/errorhandling/usematurelogger.md create mode 100644 translations/spanish/sections/errorhandling/useonlythebuiltinerror.md create mode 100644 translations/spanish/sections/production/apmproducts.md create mode 100644 translations/spanish/sections/production/assigntransactionid.md create mode 100644 translations/spanish/sections/production/bestateless.md create mode 100644 translations/spanish/sections/production/createmaintenanceendpoint.md create mode 100644 translations/spanish/sections/production/delegatetoproxy.md create mode 100644 translations/spanish/sections/production/detectvulnerabilities.md create mode 100644 translations/spanish/sections/production/frontendout.md create mode 100644 translations/spanish/sections/production/guardprocess.md create mode 100644 translations/spanish/sections/production/lockdependencies.md create mode 100644 translations/spanish/sections/production/measurememory.md create mode 100644 translations/spanish/sections/production/monitoring.md create mode 100644 translations/spanish/sections/production/productoncode.md create mode 100644 translations/spanish/sections/production/setnodeenv.md create mode 100644 translations/spanish/sections/production/smartlogging.md create mode 100644 translations/spanish/sections/production/utilizecpu.md create mode 100644 translations/spanish/sections/projectstructre/breakintcomponents.md create mode 100644 translations/spanish/sections/projectstructre/configguide.md create mode 100644 translations/spanish/sections/projectstructre/createlayers.md create mode 100644 translations/spanish/sections/projectstructre/separateexpress.md create mode 100644 translations/spanish/sections/projectstructre/thincomponents.md create mode 100644 translations/spanish/sections/projectstructre/wraputilities.md create mode 100644 translations/spanish/sections/template.md create mode 100644 translations/spanish/sections/testingandquality/bumpversion.md diff --git a/translations/spanish/README.md b/translations/spanish/README.md new file mode 100644 index 000000000..a0b6db89b --- /dev/null +++ b/translations/spanish/README.md @@ -0,0 +1,674 @@ +[✔]: ../../assets/images/checkbox-small-blue.png + +# Mejores prácticas de NodeJS + +

+ Node.js Best Practices +

+ +
+ +
+50 items Last update: Oct 20, 2017 Updated for Node v.8.4 +
+ +
+ + [![nodepractices](/assets/images/twitter-s.png)](https://twitter.com/nodepractices/) **¡Síguenos en Twitter!** [**@nodepractices**](https://twitter.com/nodepractices/) +
+ +# ¡Bienvenido! 3 cosas que necesitas saber primero: +**1. Cuando lees aquí, lees docenas de los mejores artículos de Node.JS -** este es un resumen y conservación del contenido mejor clasificado de las mejores prácticas de NodeJS + +**2. Es la compilación más grande y crece cada semana -** actualmente, se presentan más de 50 prácticas, guías de estilo y consejos arquitectónicos. Damos la bienvenida a issues y pull requests para mantener este libro actualizado. Nos encantaría verte contribuir aquí, ya sea corrigiendo algunos errores de código o sugiriendo nuevas ideas brillantes: se parte del libro de mejores prácticas de Node.JS + +**3. La mayoría de los puntos tiene información adicional -** Encontrás cerca de los puntos de mejores prácticas el enlace **🔗Leer más** que te dará algunos ejemplos de código, citas de blogs seleccionados y mas información + +


+ +## Tabla de contenidos +1. [Prácticas para estructura del proyecto (5)](#1-project-structure-practices) +2. [Prácticas en manejo de errores (11) ](#2-error-handling-practices) +3. [Prácticas de estilo de código (12) ](#3-code-style-practices) +4. [Practicas de prueba y calidad en general (8) ](#4-testing-and-overall-quality-practices) +5. [Prácticas de puesta en producción (16) ](#5-going-to-production-practices) +6. Prácticas de Seguridad (próximamente) +7. Prácticas de Rendimiento (próximamente) + +


+# `1. Prácticas de estructura del proyecto` + +## ![✔] 1.1 Estructura tu solución en componentes + +**TL;DR:** El peor inconveniente de las grandes aplicaciones es mantener una gran base de código con cientos de dependencias, un monolito que ralentiza a los desarrolladores que intentan incorporar nuevas características. En cambio, particiona tu código en componentes, cada uno obtiene su propia carpeta o una base de código dedicada, y asegúrate de que cada unidad se mantenga pequeña y simple. Visita 'Leer más' a continuación para ver ejemplos de la estructura correcta del proyecto + +**De lo contrario:** Cuando desarrolladores codifican nuevas características luchan por darse cuenta del impacto de su cambio y temen romper otros componentes dependientes - las implementaciones se vuelven más lentas y más riesgosas. También se considera más difícil escalar cuando todas las unidades de negocios no están separadas + +🔗 [**Leer más: estructura en componentes**](/sections/projectstructre/breakintcomponents.md) + +

+ +## ![✔] 1.2 Aplicar capas para componentes, mantén Express dentro de sus límites + +**TL;DR:** Cada componente debería contener 'capas'- un objeto dedicado para la web, la lógica y código para acceso a datos. Esto no solo genera una clara separación de conceptos sino que también facilita significativamente los mocks y la pruebas del sistema. Aunque este es un patrón muy común, los desarrolladores de APIs tienden a mezclar capas pasando los objetos de la capa web (Express req, res) a la lógica de negocios y capas de datos, esto hace que su aplicación dependa y solo sea accesible por Express. + +**De lo contrario:** Una aplicación que mezcla objectos de web con otras capas no puede ser accedida por código de pruebas, CRON jobs y otras llamadas que no son de Express. + +🔗 [**Leer más: Aplicar capas a tu aplicación**](/sections/projectstructre/createlayers.md) + +

+ +## ![✔] 1.3 Envuelve las utilidades comunes como paquetes de NPM + +**TL;DR:** En una aplicación grande que se constituye de múltples bases de código, utilidades transversales como los loggers, cifrado y similares, deben de estar envueltos por su propio código y expuestos como paquetes privados de NPM. Esto permite compartirlos entre múltiples base de código y proyectos. + +**De lo contrario:** Tendrás que inventar tu propia implementación y rueda de dependencia + +🔗 [**Leer más: Estructura por característica**](/sections/projectstructre/wraputilities.md) + +

+ +## ![✔] 1.4 Separar 'servidor' y 'aplicación' de express + +**TL;DR:** Evite el desagradable hábito de definir toda la aplicación [Express](https://expressjs.com/) en un único archivo enorme; separa tú definición de 'Express' en al menos dos archivos: la declaración del API (app.js) y los características de red (WWW). Incluso para una mejor estructura, ubica tu declaración del API dentro de los componentes. + +**De lo contrario:** Se podrá acceder a tu API para realizar pruebas solo a través de llamadas HTTP (más lento y mucho más difícil para generar informes de cobertura). Probablemente tampoco sea un placer enorme mantener cientos de líneas de código en un solo archivo + +🔗 [**Leer más: separar 'servidor' y 'aplicación' de express**](/sections/projectstructre/separateexpress.md) + +

+ +## ![✔] 1.5 Usar una configuración segura, jerárquica y consciente del entorno + +De lo contrario: no cumplir con ninguno de los requisitos de configuración simplemente empantana el equipo de desarrollo o el equipo devpos. Probablemente ambos + +🔗 Leer más: mejores prácticas de configuración + +**TL;DR:** La configuración perfecta e impecable debe incluir (a) claves que se pueden leer desde el archivo Y desde la variable de entorno (b) los secretos se guardan fuera del código al que se ha hecho commit (c) config es jerárquica para facilitar la localización. Solo hay unos pocos paquetes que pueden ayudar a validar la mayoría de estos casos como [nconf](https://www.npmjs.com/package/nconf) y [config](https://www.npmjs.com/package/config) + +**De lo contrario:** No cumplir con ninguno de los requisitos de configuración simplemente frena al equipo de desarrollo o al equipo de devpos. Probablemente ambos + +🔗 [**Leer más: buenas prácticas de configuración **](/sections/projectstructre/configguide.md) + + +


+ +

⬆ Return to top

+ +# `2. Error Handling Practices` + +## ![✔] 2.1 Use Async-Await or promises for async error handling + +**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch + +**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns + +🔗 [**Read More: avoiding callbacks**](/sections/errorhandling/asyncerrorhandling.md) + +

+ +## ![✔] 2.2 Use only the built-in Error object + +**TL;DR:** Many throws errors as a string or as some custom type – this complicates the error handling logic and the interoperability between modules. Whether you reject a promise, throw exception or emit error – using only the built-in Error object will increases uniformity and prevents loss of information + + +**Otherwise:** When invoking some component, being uncertain which type of errors come in return – makes it much harder to handle errors properly. Even worse, using custom types to describe errors might lead to loss of critical error information like the stack trace! + +🔗 [**Read More: using the built-in error object**](/sections/errorhandling/useonlythebuiltinerror.md) + +

+ +## ![✔] 2.3 Distinguish operational vs programmer errors + +**TL;DR:** Operational errors (e.g. API received an invalid input) refer to known cases where the error impact is fully understood and can be handled thoughtfully. On the other hand, programmer error (e.g. trying to read undefined variable) refers to unknown code failures that dictate to gracefully restart the application + +**Otherwise:** You may always restart the application when an error appear, but why letting ~5000 online users down because of a minor, predicted, operational error? the opposite is also not ideal – keeping the application up when unknown issue (programmer error) occurred might lead to an unpredicted behavior. Differentiating the two allows acting tactfully and applying a balanced approach based on the given context + + 🔗 [**Read More: operational vs programmer error**](/sections/errorhandling/operationalvsprogrammererror.md) + +

+ +## ![✔] 2.4 Handle errors centrally, not within an Express middleware + +**TL;DR:** Error handling logic such as mail to admin and logging should be encapsulated in a dedicated and centralized object that all end-points (e.g. Express middleware, cron jobs, unit-testing) call when an error comes in. + +**Otherwise:** Not handling errors within a single place will lead to code duplication and probably to errors that are handled improperly + +🔗 [**Read More: handling errors in a centralized place**](/sections/errorhandling/centralizedhandling.md) + +

+ +## ![✔] 2.5 Document API errors using Swagger + +**TL;DR:** Let your API callers know which errors might come in return so they can handle these thoughtfully without crashing. This is usually done with REST API documentation frameworks like Swagger + +**Otherwise:** An API client might decide to crash and restart only because he received back an error he couldn’t understand. Note: the caller of your API might be you (very typical in a microservices environment) + + +🔗 [**Read More: documenting errors in Swagger**](/sections/errorhandling/documentingusingswagger.md) + +

+ +## ![✔] 2.6 Shut the process gracefully when a stranger comes to town + +**TL;DR:** When an unknown error occurs (a developer error, see best practice number #3)- there is uncertainty about the application healthiness. A common practice suggests restarting the process carefully using a ‘restarter’ tool like Forever and PM2 + +**Otherwise:** When an unfamiliar exception is caught, some object might be in a faulty state (e.g an event emitter which is used globally and not firing events anymore due to some internal failure) and all future requests might fail or behave crazily + +🔗 [**Read More: shutting the process**](/sections/errorhandling/shuttingtheprocess.md) + +

+ + + +## ![✔] 2.7 Use a mature logger to increase errors visibility + +**TL;DR:** A set of mature logging tools like Winston, Bunyan or Log4J, will speed-up error discovery and understanding. So forget about console.log. + +**Otherwise:** Skimming through console.logs or manually through messy text file without querying tools or a decent log viewer might keep you busy at work until late + +🔗 [**Read More: using a mature logger**](/sections/errorhandling/usematurelogger.md) + + +

+ + +## ![✔] 2.8 Test error flows using your favorite test framework + +**TL;DR:** Whether professional automated QA or plain manual developer testing – Ensure that your code not only satisfies positive scenario but also handle and return the right errors. Testing framework like Mocha & Chai can handle this easily (see code examples within the "Gist popup") + +**Otherwise:** Without testing, whether automatically or manually, you can’t rely on our code to return the right errors. Without meaningful errors – there’s no error handling + + +🔗 [**Read More: testing error flows**](/sections/errorhandling/testingerrorflows.md) + +

+ +## ![✔] 2.9 Discover errors and downtime using APM products + +**TL;DR:** Monitoring and performance products (a.k.a APM) proactively gauge your codebase or API so they can auto-magically highlight errors, crashes and slow parts that you were missing + +**Otherwise:** You might spend great effort on measuring API performance and downtimes, probably you’ll never be aware which are your slowest code parts under real world scenario and how these affects the UX + + +🔗 [**Read More: using APM products**](/sections/errorhandling/apmproducts.md) + +

+ + +## ![✔] 2.10 Catch unhandled promise rejections + +**TL;DR:** Any exception thrown within a promise will get swallowed and discarded unless a developer didn’t forget to explictly handle. Even if you’re code is subscribed to process.uncaughtException! Overcome this by registering to the event process.unhandledRejection + +**Otherwise:** Your errors will get swallowed and leave no trace. Nothing to worry about + + +🔗 [**Read More: catching unhandled promise rejection **](/sections/errorhandling/catchunhandledpromiserejection.md) + +

+ +## ![✔] 2.11 Fail fast, validate arguments using a dedicated library + +**TL;DR:** This should be part of your Express best practices – Assert API input to avoid nasty bugs that are much harder to track later. Validation code is usually tedious unless using a very cool helper libraries like Joi + +**Otherwise:** Consider this – your function expects a numeric argument “Discount” which the caller forgets to pass, later on your code checks if Discount!=0 (amount of allowed discount is greater than zero), then it will allow the user to enjoy a discount. OMG, what a nasty bug. Can you see it? + +🔗 [**Read More: failing fast**](/sections/errorhandling/failfast.md) + +


+ +

⬆ Return to top

+ +# `3. Code Style Practices` + +## ![✔] 3.1 Use ESLint + +**TL;DR:** ESLint is the de-facto standard for checking code style, not only to identify nitty-gritty spacing issues but also to detect serious code anti-patterns like developers throwing errors without classification. Using ESLint and following the rest of the code style practices below means following the same styles used by the rest of the community, as well as the same code styles used in the core products themselves. + +**Otherwise:** developers will focus on tedious spacing and line-width concerns + +

+ +## ![✔] 3.2 Node JS Specific Plugins + +**TL;DR:** On top of ESLint standard rules that cover vanilla JS only, add Node-specific plugins like [eslint-plugin-node](https://www.npmjs.com/package/eslint-plugin-node), [eslint-plugin-mocha](https://www.npmjs.com/package/eslint-plugin-mocha) and [eslint-plugin-node-security](https://www.npmjs.com/package/eslint-plugin-security) + +**Otherwise:** Many faulty Node.JS code patterns might escape under the radar. For example, developers might require(variableAsPath) files with a variable given as path which allows attackers to execute any JS script. Node.JS linters can detect such patterns and complain early + +

+ +## ![✔] 3.3 Start a Codeblock's Curly Braces in the Same Line + +**TL;DR:** The opening curly braces of a code block should be in the same line of the opening statement. + +### Code Example +```javascript + // Do + function someFunction() { + // code block + } + + //Avoid + function someFunction + { + // code block + } +``` + +**Otherwise:** Deferring from this best practice might lead to unexpected results, as can be seen in the Stackoverflow thread below: + +🔗 [**Read more:** "Why does a results vary based on curly brace placement?" (Stackoverflow)](https://stackoverflow.com/questions/3641519/why-does-a-results-vary-based-on-curly-brace-placement) + +

+ +## ![✔] 3.4 Don't Forget the Semicolon + +**TL;DR:** While not unanimously agreed upon, it is still recommended to put a semicolon at the end of each statement. This will make your code more readable and explicit to other developers who read it. + +**Otherwise:** As seen in the previous section, Javascript's interpeter auto adds semicolon at the end of a statement if there isn't one which can lead to some undesired results. + +

+ +## ![✔] 3.5 Name Your Functions + +**TL;DR:** Name all functions, including closures and callbacks. Avoid anonymous functions. This is especially useful when profiling a node app. Naming all functions will allow you to easily understand what you're looking at when cheking a memory snapshot. + +**Otherwise:** Debugging production issues using a core dump (memory snapshot) might become challenging as you notice significant memory consumption from functions with no name. + +

+ +## ![✔] 3.6 Naming conventions for variables, constants, functions and classes + +**TL;DR:** Use ***lowerCamelCase*** when naming variables and functions, ***UpperCamelCase*** (capital first letter as well) when naming classes and ***UPPERCASE*** for constants. This will help you to easily distinguish between plain variables / functions, and classes that require instantiation. Use descriptive names, but try to keep them short. + +**Otherwise:** Javascript is the only language in the world which allows to invoke a constructor ("Class") directly without instantiating it first. Consequently, Classes and function-constructors are differentiated by starting with UpperCamelCase. + +### Code Example ### +```javascript + // for class name we use UpperCamelCase + class SomeClassExample () { + + // for const name we use UPPERCASE + const CONFIG = { + key: 'value' + }; + + // for variables and functions names we use lowerCamelCase + let someVariableExample = 'value'; + function doSomething() { + + } + + } +``` + +

+ +## ![✔] 3.7 Prefer const over let. Ditch the var + +**TL;DR:** Using `const` means that once a variable is assigned, it cannot be reassigned. Prefering const will help you to not be tempted to use the same variable for different uses, and make your code clearer. If a variable needs to be reassigned, in a for loop for example, use `let` to declare it. Another important aspect of let is that a variable declared using let is only available in the block scope in which it was defined. `var` is function scoped, not block scoped, and [shouldn't be used in ES6](https://hackernoon.com/why-you-shouldnt-use-var-anymore-f109a58b9b70) now that you have const and let at your disposal. + +**Otherwise:** Debugging becomes way more cumbersome when following a variable that frequently changes. + +🔗 [**Read more: JavaScript ES6+: var, let, or const?** ](https://medium.com/javascript-scene/javascript-es6-var-let-or-const-ba58b8dcde75) + +

+ +## ![✔] 3.8 Requires come first, and not inside functions. + +**TL;DR:** Require modules at the beginning of each file, before and outside of any functions. This simple best practice will not only help you easily and quickly tell the dependencies of a file right at the top, but also avoids a couple of potential problems. + +**Otherwise:** Requiers are run syncronously by Node JS. If they are called from withing a function, it may block other requests from being handled at a more critical time. Also, if a required module or any of its own dependencies throw an error and crashes the server, it is best to find out about it as soon as possible, which might not be the case if that module is required from within a function. + +

+ +## ![✔] 3.9 Do Require on folders, not directly on files + +**TL;DR:** When developing a module/library in a folder, place an index.js file that exposes the module's +internals so every consumer will pass through it. This will serves as 'interface' to your module and ease +future changes without breaking the contract. + +**Otherwise:** Changing to the internal structure of files or the signature may break the interface with +clients. + +### Code example +```javascript + // Do + module.exports.SMSProvider = require('./SMSProvider'); + module.exports.SMSNumberResolver = require('./SMSNumberResolver'); + + // Avoid + module.exports.SMSProvider = require('./SMSProvider/SMSProvider.js'); + module.exports.SMSNumberResolver = require('./SMSNumberResolver/SMSNumberResolver.js'); +``` + +

+ + +## ![✔] 3.10 Use the `===` operator + +**TL;DR:** Prefer the strict equality operator `===` over the weaker abstract equality operator `==`. `==` will compare two variables after converting them to a common type. There is not type conversion in `===`, and both variables must be of the same type to be equal. + +**Otherwise:** Unequal variables might return true when compared with the `==` operator. + +### Code example +```javascript +'' == '0' // false +0 == '' // true +0 == '0' // true + +false == 'false' // false +false == '0' // true + +false == undefined // false +false == null // false +null == undefined // true + +' \t\r\n ' == 0 // true +``` +All statements above will return false if used with `===` + +

+ +## ![✔] 3.11 Use Async Await, avoid callbacks + +**TL;DR:** Node 8 LTS now has full support for Async-await. This is a new way of dealing with asyncronous code which supercedes callbacks and promises. Async-await is non blocking, and it makes asynchronous code looks more synchronous. The best gift you can give to your code is using async-await which provides much compact and familiar code syntax like try-catch. + +**Otherwise:** Handling async errors in callback style is probably the fastest way to hell - this style forces to check errors all over, deal with akward code nesting and make it difficult to reason about the code flow. + +🔗[**Read more:** Guide to async await 1.0](https://github.com/yortus/asyncawait) + +

+ +## ![✔] 3.12 Use => Arrow Functions + +**TL;DR:** Though it's recommended to use async-await and avoid function parameters, when dealing with older API that accept promises or callbacks - arrow functions makes the code structure more compact and keeps the lexical context of the root function (i.e. 'this'). + +**Otherwise:** Longer code (in ES5 functions) is more prone to bugs and cumbersome to read. + +🔗 [**Read mode: It’s Time to Embrace Arrow Functions**](https://medium.com/javascript-scene/familiarity-bias-is-holding-you-back-its-time-to-embrace-arrow-functions-3d37e1a9bb75) + + +


+ +

⬆ Return to top

+ + +# `4. Testing And Overall Quality Practices` + +## ![✔] 4.1 At the very least, write API (component) testing + +**TL;DR:** Most projects just don't have any automated testing due to short time tables or often the 'testing project' run out of control and being abandoned. For that reason, prioritize and start with API testing which are the easiest to write and provide more coverage than unit testing (you may even craft API tests without code using tools like [Postman](https://www.getpostman.com/). Afterwards, should you have more resources and time, continue with advanced test types like unit testing, DB testing, performance testing, etc + +**Otherwise:** You may spend long days on writing unit tests to find out that you got only 20% system coverage + +

+ +## ![✔] 4.2 Detect code issues with ESLint + specific Node plugin rules + +**TL;DR:** ESLint is the de-facto standard for checking code style, not only to identify nitty-gritty spacing issues but also to detect serious code anti-patterns like developers throwing errors without classification. On top of ESLint standard rules that cover vanilla JS only, add Node-specific plugins like [eslint-plugin-node](https://www.npmjs.com/package/eslint-plugin-node), [eslint-plugin-mocha](https://www.npmjs.com/package/eslint-plugin-mocha) and [eslint-plugin-node-security](https://www.npmjs.com/package/eslint-plugin-security) + +**Otherwise:** Many faulty Node.JS code patterns might escape under the radar. For example, developers might require(variableAsPath) files with a variable given as path which allows attackers to execute any JS script. Node.JS linters can detect such patterns and complain early + + +

+ +## ![✔] 4.3 Carefully choose your CI platform (Jenkins vs Rest of the world) + +**TL;DR:** Your continuous integration platform (CICD) will host all the quality tools (e.g test, lint) so it better come with a vibrant echo-system of plugins. [Jenkins](https://jenkins.io/) is the default for many projects as it has the biggest community along with a very powerful platform at the price of complex setup that demands a steep learning curve. Its rivals, online SaaS like [Travis](https://travis-ci.org/) and [CircleCI](https://circleci.com), are much easier to setup without the burden of managing the whole infrastructure. Eventually, it's a trade-off between robustness and speed - choose your side carefully + +**Otherwise:** Choosing some lightweight SaaS vendor might get you blocked once you need some advanced customization. On the other hand, going with Jenkins might burn precious time on infrastructure setup + + +

+ +## ![✔] 4.4 Constantly inspect for vulenerable dependencies + +**TL;DR:** Even the most reputable dependencies such as Express have known vulnerabilities. This can get easily tamed using community and commercial tools such as 🔗 [nsp](https://github.com/nodesecurity/nsp) that can be invoked from your CI on every build + +**Otherwise:** Keeping your code clean from vulnerabilities without dedicated tools will require to constantly follow online publications about new threats. Quite tedious + +

+ +## ![✔] 4.5 Tag your tests + +**TL;DR:** Different tests must run on different scenarios: quick smoke, IO-less, tests should run when a developer saves or commits a file, full end-to-end tests usually run when a new pull request is submitted, etc. This can be achieved by tagging tests with keywords like #cold #api #sanity so you can grep with your testing harness and invoke the desired subset. For example, this is how you would invoke only the sanity test group with [Mocha](https://mochajs.org/): mocha --grep 'sanity' + +**Otherwise:** Running all the tests, including tests that perform dozens of DB queries, any time a developer makes a small change can be extremly slow and keep developers away for running tests + +

+ +## ![✔] 4.6 Check your test coverage, it helps to identify wrong test patterns + +**TL;DR:** Code coverage tools like [Istanbul/NYC ](https://github.com/gotwarlost/istanbul)are great for 3 reasons: it comes for free (no effort is required to benefit this reports), it helps to identify a decrease in testing coverage, and last but least it highlights testing mismatches: by looking at colored code coverage reports you may notice, for example, code areas that are never tested like catch clauses (meaning that tests only invoke the happy paths and not how the app behaves on errors). Set it to fail builds if the coverage falls under a certain threshold + +**Otherwise:** There won't be any automated metric that tells you when large portion of your code is not covered by testing + + + +

+ +## ![✔] 4.7 Inspect for outdated packages + +**TL;DR:** Use your preferred tool (e.g. 'npm outdated' or [npm-check-udpates](https://www.npmjs.com/package/npm-check-updates) to detect installed packages which are outdated, inject this check into your CI pipeline and even make a build fail in a severe scenario. For example, a sever scenario might be when an installed package lag by 5 patch commits behind (e.g. local version is 1.3.1 and repository version is 1.3.8) or it is tagged as deprecated by its author - kill the build and prevent deploying this version + +**Otherwise:** Your production will run packages that have been explicitly tagged by their author as risky + +

+ +## ![✔] 4.8 Use docker-compose for e2e testing + +**TL;DR:** End to end (e2e) testing which includes live data used to be the weakest link of the CI process as it depends on multiple heavy services like DB. Docker-compose turns this problem into a breeze by crafting production-like environment using a simple text file and easy commands. It allows crafting all the dependent services, DB and isolated network for e2e testing. Last but not least, it can keep a stateless environment that is invoked before each test suite and dies right after + + +**Otherwise:** Without docker-compose teams must maintain a testing DB for each testing environment including developers machines, keep all those DBs in sync so test results won't vary across environments + + +


+ +

⬆ Return to top

+ +# `5. Going To Production Practices` +## ![✔] 5.1. Monitoring! + +**TL;DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my suggestions inside), then go over additional fancy features and choose the solution that tick all boxes. Click ‘The Gist’ below for overview of solutions + +**Otherwise:** Failure === disappointed customers. Simple. + + +🔗 [**Read More: Monitoring!**](/sections/production/monitoring.md) + +

+ +## ![✔] 5.2. Increase transparency using smart logging + +**TL;DR:** Logs can be a dumb warehouse of debug statements or the enabler of a beautiful dashboard that tells the story of your app. Plan your logging platform from day 1: how logs are collected, stored and analyzed to ensure that the desired information (e.g. error rate, following an entire transaction through services and servers, etc) can really be extracted + +**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information + + +🔗 [**Read More: Increase transparency using smart logging**](/sections/production/smartlogging.md) + +

+ +## ![✔] 5.3. Delegate anything possible (e.g. gzip, SSL) to a reverse proxy + +**TL;DR:** Node is awfully bad at doing CPU intensive tasks like gzipping, SSL termination, etc. Instead, use a ‘real’ middleware services like nginx, HAproxy or cloud vendor services + +**Otherwise:** Your poor single thread will keep busy doing networking tasks instead of dealing with your application core and performance will degrade accordingly + + +🔗 [**Read More: Delegate anything possible (e.g. gzip, SSL) to a reverse proxy**](/sections/production/delegatetoproxy.md) + +

+ +## ![✔] 5.4. Lock dependencies + +**TL;DR:** Your code must be identical across all environments but amazingly NPM lets dependencies drift across environments be default – when you install packages at various environments it tries to fetch packages’ latest patch version. Overcome this by using NPM config files , .npmrc, that tell each environment to save the exact (not the latest) version of each package. Alternatively, for finer grain control use NPM” shrinkwrap”. *Update: as of NPM5 , dependencies are locked by default. The new package manager in town, Yarn, also got us covered by default + +**Otherwise:** QA will thoroughly test the code and approve a version that will behave differently at production. Even worse, different servers at the same production cluster might run different code + + +🔗 [**Read More: Lock dependencies**](/sections/production/lockdependencies.md) + +

+ +## ![✔] 5.5. Guard process uptime using the right tool + +**TL;DR:** The process must go on and get restarted upon failures. For simple scenario, ‘restarter’ tools like PM2 might be enough but in today ‘dockerized’ world – a cluster management tools should be considered as well + +**Otherwise:** Running dozens of instances without clear strategy and too many tools together (cluster management, docker, PM2) might lead to a devops chaos + + +🔗 [**Read More: Guard process uptime using the right tool**](/sections/production/guardprocess.md) + + +

+ +## ![✔] 5.6. Utilize all CPU cores + +**TL;DR:** At its basic form, a Node app runs over a single CPU core while as all other are left idle. It’s your duty to replicate the Node process and utilize all CPUs – For small-medium apps you may use Node Cluster or PM2. For a larger app consider replicating the process using some Docker cluster (e.g. K8S, ECS) or deployment scripts that are based on Linux init system (e.g. systemd) + +**Otherwise:** Your app will likely utilize only 25% of its available resources(!) or even less. Note that a typical server has 4 CPU cores or more, naive deployment of Node.JS utilizes only 1 (even using PaaS services like AWS beanstalk!) + + +🔗 [**Read More: Utilize all CPU cores**](/sections/production/utilizecpu.md) + +

+ +## ![✔] 5.7. Create a ‘maintenance endpoint’ + +**TL;DR:** Expose a set of system-related information, like memory usage and REPL, etc in a secured API. Although it’s highly recommended to rely on standard and battle-tests tools, some valuable information and operations are easier done using code + +**Otherwise:** You’ll find that you’re performing many “diagnostic deploys” – shipping code to production only to extract some information for diagnostic purposes + + +🔗 [**Read More: Create a ‘maintenance endpoint’**](/sections/production/createmaintenanceendpoint.md) + +

+ +## ![✔] 5.8. Discover errors and downtime using APM products + +**TL;DR:** Monitoring and performance products (a.k.a APM) proactively gauge codebase and API so they can auto-magically go beyond traditional monitoring and measure the overall user-experience across services and tiers. For example, some APM products can highlight a transaction that loads too slow on the end-users side while suggesting the root cause + +**Otherwise:** You might spend great effort on measuring API performance and downtimes, probably you’ll never be aware which is your slowest code parts under real world scenario and how these affects the UX + + +🔗 [**Read More: Discover errors and downtime using APM products**](/sections/production/apmproducts.md) + + +

+ + +## ![✔] 5.9. Make your code production-ready + +**TL;DR:** Code with the end in mind, plan for production from day 1. This sounds a bit vague so I’ve compiled inside (click Gist below) few development tips that are closely related to production maintenance + +**Otherwise:** A world champion IT/devops guy won’t save a system that is badly written + + +🔗 [**Read More: Make your code production-ready**](/sections/production/productoncode.md) + +

+ +## ![✔] 5.10. Measure and guard the memory usage + +**TL;DR:** Node.js has controversial relationships with memory: the v8 engine has soft limits on memory usage (1.4GB) and there are known paths to leaks memory in Node’s code – thus watching Node’s process memory is a must. In small apps you may gauge memory periodically using shell commands but in medium-large app consider baking your memory watch into a robust monitoring system + +**Otherwise:** Your process memory might leak a hundred megabytes a day like happened in Wallmart + + +🔗 [**Read More: Measure and guard the memory usage**](/sections/production/measurememory.md) + +

+ + +## ![✔] 5.11. Get your frontend assets out of Node + +**TL;DR:** Serve frontend content using dedicated middleware (nginx, S3, CDN) because Node performance really get hurts when dealing with many static files due to its single threaded model + +**Otherwise:** Your single Node thread will keep busy streaming hundreds of html/images/angular/react files instead of allocating all its resources for the task it was born for – serving dynamic content + + +🔗 [**Read More: Get your frontend assets out of Node**](/sections/production/frontendout.md) + +

+ + +## ![✔] 5.12. Be stateless, kill your Servers almost every day + +**TL;DR:** Store any type of data (e.g. users session, cache, uploaded files) within external data stores. Consider ‘killing’ your servers periodically or use ‘serverless’ platform (e.g. AWS Lambda) that explicitly enforces a stateless behavior + +**Otherwise:** Failure at a given server will result in application downtime instead of a just killing a faulty machine. Moreover, scaling-out elasticity will get more challenging due to the reliance on a specific server + + +🔗 [**Read More: Be stateless, kill your Servers almost every day**](/sections/production/bestateless.md) + + +

+ + +## ![✔] 5.13. Use tools that automatically detect vulnerabilities + +**TL;DR:** Even the most reputable dependencies such as Express have known vulnerabilities from time to time that put a system at risk. This can get easily tamed using community and commercial tools that constantly check for vulnerabilities and warn (locally or at GitHub), some can even patch them immediately + +**Otherwise:** Otherwise: Keeping your code clean from vulnerabilities without dedicated tools will require to constantly follow online publications about new threats. Quite tedious + + +🔗 [**Read More: Use tools that automatically detect vulnerabilities**](/sections/production/detectvulnerabilities.md) + +

+ + +## ![✔] 5.14. Assign ‘TransactionId’ to each log statement + +**TL;DR:** Assign the same identifier, transaction-id: {some value}, to each log entry within a single request. Then when inspecting errors in logs, easily conclude what happened before and after. Unfortunately, this is not easy to achieve in Node due its async nature, see code examples inside + +**Otherwise:** Looking at a production error log without the context – what happened before – makes it much harder and slower to reason about the issue + + +🔗 [**Read More: Assign ‘TransactionId’ to each log statement**](/sections/production/assigntransactionid.md) + +

+ + +## ![✔] 5.15. Set NODE_ENV=production + +**TL;DR:** Set the environment variable NODE_ENV to ‘production’ or ‘development’ to flag whether production optimizations should get activated – many NPM packages determining the current environment and optimize their code for production + +**Otherwise:** Omitting this simple property might greatly degrade performance. For example, when using Express for server side rendering omitting NODE_ENV makes the slower by a factor of three! + + +🔗 [**Read More: Set NODE_ENV=production**](/sections/production/setnodeenv.md) + + +

+ + +## ![✔] 5.16. Design automated, atomic and zero-downtime deployments + +**TL;DR:** Researches show that teams who perform many deployments – lowers the probability of severe production issues. Fast and automated deployments that don’t require risky manual steps and service downtime significantly improves the deployment process. You should probably achieve that using Docker combined with CI tools as they became the industry standard for streamlined deployment + +**Otherwise:** Long deployments -> production down time & human-related error -> team unconfident and in making deployment -> less deployments and features + +


+ +

⬆ Return to top

+ +# `Security Practices` + +## our contributirs working on this section, would you like to join? + +


+# `Performance Practices` + +## our contributirs working on this section, would you like to join? + + +


+# Contributors +## `Yoni Goldberg` +Developer & consultant, Backend expert, JavaScript enthusiast, focused on Node.JS. Many of the bullets was first published on his blog post [http://www.goldbergyoni.com](http://www.goldbergyoni.com) + +## `Ido Richter` +👨‍💻 Software engineer, 🌐 web developer, 🤖 emojis enthusiast. diff --git a/translations/spanish/sections/drafts/readme-general-toc-1.md b/translations/spanish/sections/drafts/readme-general-toc-1.md new file mode 100644 index 000000000..6590b0f40 --- /dev/null +++ b/translations/spanish/sections/drafts/readme-general-toc-1.md @@ -0,0 +1,84 @@ + + +

+ Node.js Best Practices +

+ +53 items Last update: 7 days ago Updated for Node v.8.4 + + + +# Welcome to Node.js Best Practices + +Welcome to the biggest compilation of Node.JS best practices. The content below was gathered from all top ranked books and posts and is updated constantly - when you read here rest assure that no significant tip slipped away. Feel at home - we love to discuss via PRs, issues or Gitter. + +## Table of Contents +* [Project Setup Practices (18)](#project-setup-practices) +* [Code Style Practices (11) ](#code-style-practices) +* [Error Handling Practices (14) ](#error-handling-practices) +* [Going To Production Practices (21) ](#going-to-production-practices) +* [Testing Practices (9) ](#deployment-practices) +* [Security Practices (8) ](#security-practices) + + +

+# `Project Setup Practices` + +## ✔ 1. Structure your solution by feature ('microservices') + +**TL&DR:** The worst large applications pitfal is a huge code base with hundreds of dependencies that slow down they developers as they try to incorporate new features. Partioning into small units ensures that each unit is kept simple and easy to maintain. This strategy pushes the complexity to the higher level - designing the cross-component interactions. + +**Otherwise:** Developing a new feature with a change to few objects demands to evaluate how this changes might affect dozends of dependants and ach deployment becomes a fear. + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +

+ +## ✔ 2. Layer your app, keep Express within its boundaries + +**TL&DR:** It's very common to see Express API passes the express objects (req, res) to business logic and data layers, sometimes even to every function - this makes your application depedant on and accessible by Express only. What if your code should be reached by testing console or CRON job? instead create your own context object with cross-cutting-concern properties like the user roles and inject into other layers, or use 'thread-level variables' libraries like continuation local storage + +**Otherwise:** Application can be accessed by Express only and require to create complex testing mocks + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +

+ +## ✔ 3. Configure ESLint with node-specific plugins + +**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug + +**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + + +


+# `Code Style Practices` + + +


+# `Error Handling Practices` +

⬆ Return to top

+ +## ✔ Use async-await for async error handling + +* **TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch + +* **Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns + +🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) + + + +


+# `Going To Production Practices` + + +


+# `Deployment Practices` + + +


+# `Security Practices` + diff --git a/translations/spanish/sections/drafts/readme-general-toc-2.md b/translations/spanish/sections/drafts/readme-general-toc-2.md new file mode 100644 index 000000000..de2701138 --- /dev/null +++ b/translations/spanish/sections/drafts/readme-general-toc-2.md @@ -0,0 +1,78 @@ +# Node.JS Best Practices + +53 items Last update: 7 days ago Updated for Node v.8.4 + +![Node.js Best Practices](assets/images/banner-1.png) + +# Welcome to Node.js Best Practices + +Welcome to the biggest compilation of Node.JS best practices. The content below was gathered from all top ranked books and posts and is updated constantly - when you read here rest assure that no significant tip slipped away. Feel at home - we love to discuss via PRs, issues or Gitter. + +## Table of Contents +* [Project Setup Practices (18)](#project-setup-practices) +* [Code Style Practices (11) ](#code-style-practices) +* [Error Handling Practices (14) ](#error-handling-practices) +* [Going To Production Practices (21) ](#going-to-production-practices) +* [Testing Practices (9) ](#deployment-practices) +* [Security Practices (8) ](#security-practices) + +

+# `Project Setup Practices` + +## ✔ 1. Structure your solution by feature ('microservices') + +**TL&DR:** The worst large applications pitfal is a huge code base where hundreds of dependencies slow down developers as try to incorporate new features. Partioning into small units ensures that each unit is kept simple and very easy to maintain. This strategy pushes the complexity to the higher level - designing the cross-component interactions. + +**Otherwise:** Developing a new feature with a change to few objects demands to evaluate how this changes might affect dozends of dependants and ach deployment becomes a fear. + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +
+ +## ✔ 2. Layer your app, keep Express within its boundaries + +**TL&DR:** It's very common to see Express API passes the express objects (req, res) to business logic and data layers, sometimes even to every function - this makes your application depedant on and accessible by Express only. What if your code should be reached by testing console or CRON job? instead create your own context object with cross-cutting-concern properties like the user roles and inject into other layers, or use 'thread-level variables' libraries like continuation local storage + +**Otherwise:** Application can be accessed by Express only and require to create complex testing mocks + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +

+ +## ✔ 3. Configure ESLint with node-specific plugins + +**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug + +**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +


+# `Code Style Practices` + + +


+# `Error Handling Practices` +

⬆ Return to top

+ +## ✔ Use async-await for async error handling + +**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch + +**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns + +🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) + + + +


+# `Going To Production Practices` + + +


+# `Deployment Practices` + + +


+# `Security Practices` + diff --git a/translations/spanish/sections/drafts/readme-general-toc-3.md b/translations/spanish/sections/drafts/readme-general-toc-3.md new file mode 100644 index 000000000..d6ec6386a --- /dev/null +++ b/translations/spanish/sections/drafts/readme-general-toc-3.md @@ -0,0 +1,82 @@ + + +

+ Node.js Best Practices +

+ +53 items Last update: 7 days ago Updated for Node v.8.4 + + + +# Welcome to Node.js Best Practices + +Welcome to the biggest compilation of Node.JS best practices, based on our check it's also the largest collection on any programming language (more than 53 items). The content below was gathered from all top ranked books and posts and is updated constantly - if you read here you can rest assure that no significant tip slipped away. Feel at home - we love to discuss via PRs, issues or Gitter. +

+## Table of Contents +* [Project Setup Practices (18)](#project-setup-practices) +* [Code Style Practices (11) ](#code-style-practices) +* [Error Handling Practices (14) ](#error-handling-practices) +* [Going To Production Practices (21) ](#going-to-production-practices) +* [Testing Practices (9) ](#deployment-practices) +* [Security Practices (8) ](#security-practices) + +

+# `Project Setup Practices` + +## ✔ 1. Structure your solution by feature ('microservices') + +**TL&DR:** The worst large applications pitfal is a huge code base where hundreds of dependencies slow down developers as try to incorporate new features. Partioning into small units ensures that each unit is kept simple and very easy to maintain. This strategy pushes the complexity to the higher level - designing the cross-component interactions. + +**Otherwise:** Developing a new feature with a change to few objects demands to evaluate how this changes might affect dozends of dependants and ach deployment becomes a fear. + +

+ +## ✔ 2. Layer your app, keep Express within its boundaries + +**TL&DR:** It's very common to see Express API passes the express objects (req, res) to business logic and data layers, sometimes even to every function - this makes your application depedant on and accessible by Express only. What if your code should be reached by testing console or CRON job? instead create your own context object with cross-cutting-concern properties like the user roles and inject into other layers, or use 'thread-level variables' libraries like continuation local storage + +**Otherwise:** Application can be accessed by Express only and require to create complex testing mocks + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + + +

+ +## ✔ 3. Configure ESLint with node-specific plugins + +**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug + +**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + + +


+# `Code Style Practices` + + +


+# `Error Handling Practices` +

⬆ Return to top

+ +## ✔ Use async-await for async error handling + +**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch + +**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns + +🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) + + + +


+# `Going To Production Practices` + + +


+# `Deployment Practices` + + +


+# `Security Practices` + diff --git a/translations/spanish/sections/drafts/readme-general-toc-4.md b/translations/spanish/sections/drafts/readme-general-toc-4.md new file mode 100644 index 000000000..987b0084c --- /dev/null +++ b/translations/spanish/sections/drafts/readme-general-toc-4.md @@ -0,0 +1,115 @@ + + +

+ Node.js Best Practices +

+ +53 items Last update: 7 days ago Updated for Node v.8.4 + + + +# Welcome to Node.js Best Practices + +Welcome to the biggest compilation of Node.JS best practices, based on our check it's also the largest collection on any programming language (more than 53 items). The content below was gathered from all top ranked books and posts and is updated constantly - if you read here you can rest assure that no significant tip slipped away. Feel at home - we love to discuss via PRs, issues or Gitter. + +## Table of Contents +* [Project Setup Practices (18)](#project-setup-practices) +* [Code Style Practices (11) ](#code-style-practices) +* [Error Handling Practices (14) ](#error-handling-practices) +* [Going To Production Practices (21) ](#going-to-production-practices) +* [Testing Practices (9) ](#deployment-practices) +* [Security Practices (8) ](#security-practices) + +

+# `Project Setup Practices` + +## ![](assets/images/checkbox-sm.png) 1. Structure your solution by feature ('microservices') + +**TL&DR:** The worst large applications pitfal is a huge code base where hundreds of dependencies slow down developers as try to incorporate new features. Partioning into small units ensures that each unit is kept simple and very easy to maintain. This strategy pushes the complexity to the higher level - designing the cross-component interactions. + +**Otherwise:** Developing a new feature with a change to few objects demands to evaluate how this changes might affect dozends of dependants and ach deployment becomes a fear. + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +

+ +## ![](assets/images/checkbox-sm.png) 2. Layer your app, keep Express within its boundaries + +**TL&DR:** It's very common to see Express API passes the express objects (req, res) to business logic and data layers, sometimes even to every function - this makes your application depedant on and accessible by Express only. What if your code should be reached by testing console or CRON job? instead create your own context object with cross-cutting-concern properties like the user roles and inject into other layers, or use 'thread-level variables' libraries like continuation local storage + +**Otherwise:** Application can be accesses by Express only and require to create complex testing mocks + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +

+ +## ![](assets/images/checkbox-sm.png) 3. Configure ESLint with node-specific plugins + +**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug + +**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + + + +

+ +## Additional 15 bullets will appear here + +


+# `Code Style Practices` + +## ![](assets/images/checkbox-sm.png) 1. Use async-await + +**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug + +**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +

+ +## ![](assets/images/checkbox-sm.png) 2. Break into small classes or objects + +**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug + +**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information + +🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) + +


+# `Error Handling Practices` +

⬆ Return to top

+ +## Use async-await for async error handling + +**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch + +**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns + +🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) + +

+ +## Use async-await for async error handling + +**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch + +**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns + +🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) + + + +


+# `Going To Production Practices` + + +


+# `Deployment Practices` + + +


+# `Security Practices` + diff --git a/translations/spanish/sections/errorhandling/apmproducts.md b/translations/spanish/sections/errorhandling/apmproducts.md new file mode 100644 index 000000000..9dc41475a --- /dev/null +++ b/translations/spanish/sections/errorhandling/apmproducts.md @@ -0,0 +1,29 @@ +# Discover errors and downtime using APM products + + +### One Paragraph Explainer + +Exception != Error. Traditional error handling assumes the existence of Exception but application errors might come in the form of slow code paths, API downtime, lack of computational resources and more. This is where APM products come handy as they allow with minimal setup to detect a wide variety of ‘burried’ issues proactively. Among the common features of APM products are – alerting when HTTP API returns errors, detect when API response time drops below some threshold, detection of ‘code smells’, monitor server resources, operational intelligence dashboard with IT metrics and many other useful features. Most vendors offer a free plan. + +### Wikipedia about APM + +In the fields of information technology and systems management, Application Performance Management (APM) is the monitoring and management of performance and availability of software applications. APM strives to detect and diagnose complex application performance problems to maintain an expected level of service. APM is “the translation of IT metrics into business meaning ([i.e.] value) +Major products and segments + +### Understanding the APM marketplace + +APM products constitues 3 major segments: + +1. Website or API monitoring – external services that constantly monitor uptime and performance via HTTP requests. Can be setup in few minutes. Following are few selected contenders: Pingdom, Uptime Robot, and New Relic + +2. Code instrumetation – products family which require to embed an agent within the application to benefit feature slow code detection, exceptions statistics, performance monitoring and many more. Following are few selected contenders: New Relic, App Dynamics + +3. Operational intelligence dashboard – these line of products are focused on fasciliatitating the ops team with metrics and curated content that helps to easily stay on top of application peroformance. This is usually involves aggregating multiple sources of information (application logs, DB logs, servers log, etc) and upfront dashboard design work. Following are few selected contenders: Datadog, Splunk + + + + ### Example: UpTimeRobot.Com – Website monitoring dashboard +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/uptimerobot.jpg "Website monitoring dashboard") + + ### Example: AppDynamic.Com – end to end monitoring combined with code instrumentation +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/app-dynamics-dashboard.png "end to end monitoring combined with code instrumentation") \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/asyncerrorhandling.md b/translations/spanish/sections/errorhandling/asyncerrorhandling.md new file mode 100644 index 000000000..f1f638523 --- /dev/null +++ b/translations/spanish/sections/errorhandling/asyncerrorhandling.md @@ -0,0 +1,56 @@ +# Use Async-Await or promises for async error handling + + +### One Paragraph Explainer + +Callbacks don’t scale as they are not familiar to most programmers, force to check errors all over, deal with nasty code nesting and make it difficult to reason about the code flow. Promise libraries like BlueBird, async, and Q pack a standard code style using RETURN and THROW to control the program flow. Specifically, they support the favorite try-catch error handling style which allows freeing the main code path from dealing with errors in every function + + +### Code Example – using promises to catch errors + + +```javascript +doWork() + .then(doWork) + .then(doOtherWork) + .then((result) => doWork) + .catch((error) => throw error) + .then(verify); +``` + +### Anti pattern code example – callback style error handling + +```javascript +getData(someParameter, function(err, result){ + if(err != null) + //do something like calling the given callback function and pass the error + getMoreData(a, function(err, result){ + if(err != null) + //do something like calling the given callback function and pass the error + getMoreData(b, function(c){ + getMoreData(d, function(e){ + if(err != null) + //you get the idea?  + }); + }); +``` + +### Blog Quote: "We have a problem with promises" + From the blog pouchdb.com + + > ……And in fact, callbacks do something even more sinister: they deprive us of the stack, which is something we usually take for granted in programming languages. Writing code without a stack is a lot like driving a car without a brake pedal: you don’t realize how badly you need it, until you reach for it and it’s not there. The whole point of promises is to give us back the language fundamentals we lost when we went async: return, throw, and the stack. But you have to know how to use promises correctly in order to take advantage of them. + +### Blog Quote: "The promises method is much more compact" + From the blog gosquared.com + + > ………The promises method is much more compact, clearer and quicker to write. If an error or exception occurs within any of the ops it is handled by the single .catch() handler. Having this single place to handle all errors means you don’t need to write error checking for each stage of the work. + +### Blog Quote: "Promises are native ES6, can be used with generators" + From the blog StrongLoop + + > ….Callbacks have a lousy error-handling story. Promises are better. Marry the built-in error handling in Express with promises and significantly lower the chances of an uncaught exception. Promises are native ES6, can be used with generators, and ES7 proposals like async/await through compilers like Babel + +### Blog Quote: "All those regular flow control constructs you are used to are completely broken" +From the blog Benno’s + + > ……One of the best things about asynchronous, callback based programming is that basically all those regular flow control constructs you are used to are completely broken. However, the one I find most broken is the handling of exceptions. Javascript provides a fairly familiar try…catch construct for dealing with exceptions. The problems with exceptions is that they provide a great way of short-cutting errors up a call stack, but end up being completely useless of the error happens on a different stack… diff --git a/translations/spanish/sections/errorhandling/catchunhandledpromiserejection.md b/translations/spanish/sections/errorhandling/catchunhandledpromiserejection.md new file mode 100644 index 000000000..94dc9e7d1 --- /dev/null +++ b/translations/spanish/sections/errorhandling/catchunhandledpromiserejection.md @@ -0,0 +1,58 @@ +# Catch unhandled promise rejections +

+ + +### One Paragraph Explainer + +Typically, most of modern Node.JS/Express application code runs within promises – whether within the .then handler, a function callback or in a catch block. Suprisingly, unless a developer remembered to add a .catch clause, errors thrown at these places disappear, even not by app.uncaughtException. Recent versions of Node added a warning message when an unhandled rejection pops, though this might help to notice when things go wrong but it's obviously not a proper error handling. The straighforward solution is to never forget adding .catch clause within each promise chain call and redirect to a centralized error handler. However building your error handling strategy only on developer’s discpline is somewhat fragile. Consequently, it’s highly recommended using a graceful fallback and subscribe to process.on(‘unhandledRejection’, callback) – this will ensure that any promise error, if not handled locally, will get its treatment. + +

+ +### Code example: these errors will not get caught by any error handler (except unhandledRejection) + +```javascript +DAL.getUserById(1).then((johnSnow) => +{ + //this error will just vanish + if(johnSnow.isAlive == false) + throw new Error('ahhhh'); +}); + +``` +

+### Code example: Catching unresolved and rejected promises + +```javascript +process.on('unhandledRejection', function (reason, p) { + //I just caught an unhandled promise rejection, since we already have fallback handler for unhandled errors (see below), let throw and let him handle that + throw reason; +}); +process.on('uncaughtException', function (error) { + //I just received an error that was never handled, time to handle it and then decide whether a restart is needed + errorManagement.handler.handleError(error); + if (!errorManagement.handler.isTrustedError(error)) + process.exit(1); +}); + +``` +

+### Blog Quote: "If you can make a mistake, at some point you will" + From the blog James Nelson + + > Let’s test your understanding. Which of the following would you expect to print an error to the console? +Promise.resolve(‘promised value’).then(function() { +throw new Error(‘error’); +}); + +Promise.reject(‘error value’).catch(function() { +throw new Error(‘error’); +}); + +new Promise(function(resolve, reject) { +throw new Error(‘error’); +}); + +I don’t know about you, but my answer is that I’d expect all of them to print an error. However, the reality is that a number of modern JavaScript environments won’t print errors for any of them.The problem with being human is that if you can make a mistake, at some point you will. Keeping this in mind, it seems obvious that we should design things in such a way that mistakes hurt as little as possible, and that means handling errors by default, not discarding them +Close GIST window Skip to toolbar +About WordPress + diff --git a/translations/spanish/sections/errorhandling/centralizedhandling.md b/translations/spanish/sections/errorhandling/centralizedhandling.md new file mode 100644 index 000000000..d9c5db3d8 --- /dev/null +++ b/translations/spanish/sections/errorhandling/centralizedhandling.md @@ -0,0 +1,83 @@ +# Handle errors centrally, through but not within middleware + + +### One Paragraph Explainer + +Without one dedicated object for error handling, greater are the chances of important errors hiding under the radar due to improper handling. The error handler object is responsible for making the error visible, for example by writing to a well-formatted logger, sending events to some monitoring product or email to admin directly. A typical error flow might be: Some module throws an error -> API router catches the error -> it propagates the error to the middleware (e.g. Express, KOA) who is responsible for catching errors -> a centralized error handler is called -> the middleware is being told whether this error is untrusted error (not operational) so it can restart the app gracefully. Note that it’s a common, yet wrong, practice to handle error within Express middleware – doing so will not cover errors that are thrown in non-web interfaces + + + +### Code Example – a typical error flow + +```javascript +//DAL layer, we don't handle errors here +DB.addDocument(newCustomer, (error, result) => { + if (error) + throw new Error("Great error explanation comes here", other useful parameters) +}); + +//API route code, we catch both sync and async errors and forward to the middleware +try { + customerService.addNew(req.body).then(function (result) { + res.status(200).json(result); + }).catch((error) => { + next(error) + }); +} +catch (error) { + next(error); +} + +//Error handling middleware, we delegate the handling to the centrzlied error handler +app.use(function (err, req, res, next) { + errorHandler.handleError(err).then((isOperationalError) => { + if (!isOperationalError) + next(err); + }); +}); + +``` + +### Code example – handling errors within a dedicated object + +```javascript +module.exports.handler = new errorHandler(); + +function errorHandler(){ + this.handleError = function (error) { + return logger.logError(err).then(sendMailToAdminIfCritical).then(saveInOpsQueueIfCritical).then(determineIfOperationalError); + } + +``` + +### Code Example – Anti Pattern: handling errors within the middleware + +```javascript +//middleware handling the error directly, who will handle Cron jobs and testing errors? +app.use(function (err, req, res, next) { + logger.logError(err); + if(err.severity == errors.high) + mailer.sendMail(configuration.adminMail, "Critical error occured", err); + if(!err.isOperational) + next(err); +}); + +``` + +### Blog Quote: "Sometimes lower levels can’t do anything useful except propagate the error to their caller" + From the blog Joyent, ranked 1 for the keywords “Node.JS error handling” + + > …You may end up handling the same error at several levels of the stack. This happens when lower levels can’t do anything useful except propagate the error to their caller, which propagates the error to its caller, and so on. Often, only the top-level caller knows what the appropriate response is, whether that’s to retry the operation, report an error to the user, or something else. But that doesn’t mean you should try to report all errors to a single top-level callback, because that callback itself can’t know in what context the error occurred… + + +### Blog Quote: "Handling each err individually would result in tremendous duplication" + From the blog JS Recipes, ranked 17 for the keywords “Node.JS error handling” + + > ……In Hackathon Starter api.js controller alone, there are over 79 occurences of error objects. Handling each err individually would result in tremendous amount of code duplication. The next best thing you can do is to delegate all error handling logic to an Express middleware… + + +### Blog Quote: "HTTP errors have no place in your database code" + From the blog Daily JS, ranked 14 for the keywords “Node.JS error handling” + + > ……You should set useful properties in error objects, but use such properties consistently. And, don’t cross the streams: HTTP errors have no place in your database code. Or for browser developers, Ajax errors have a place in code that talks to the server, but not code that processes Mustache templates… + diff --git a/translations/spanish/sections/errorhandling/documentingusingswagger.md b/translations/spanish/sections/errorhandling/documentingusingswagger.md new file mode 100644 index 000000000..6a662aa6f --- /dev/null +++ b/translations/spanish/sections/errorhandling/documentingusingswagger.md @@ -0,0 +1,15 @@ +# Document API errors using Swagger + + +### One Paragraph Explainer + +REST APIs return results using HTTP code, it’s absolutely required for the API user to be aware not only about the API schema but also about potential errors – the caller may then catch an error and tactfully handle it. For example, your API documentation might state in advanced that HTTP status 409 is returned when the customer name already exist (assuming the API register new users) so the caller can correspondingly render the best UX for the given situation. Swagger is a standard that defines the schema of API documentation with eco-system of tools that allow creating documentation easily online, see prtint screens below + +### Blog Quote: "You have to tell your callers what errors can happen" +From the blog Joyent, ranked 1 for the keywords “Node.JS logging” + + > We’ve talked about how to handle errors, but when you’re writing a new function, how do you deliver errors to the code that called your function? …If you don’t know what errors can happen or don’t know what they mean, then your program cannot be correct except by accident. So if you’re writing a new function, you have to tell your callers what errors can happen and what they mean… + + + ### Useful Tool: Swagger Online Documentation Creator +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/swaggerDoc.png "API error handling") \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/failfast.md b/translations/spanish/sections/errorhandling/failfast.md new file mode 100644 index 000000000..07c24e64f --- /dev/null +++ b/translations/spanish/sections/errorhandling/failfast.md @@ -0,0 +1,50 @@ +# Fail fast, validate arguments using a dedicated library + + +### One Paragraph Explainer + +We all know how checking arguments and failing fast is important to avoid hidden bugs (see anti-pattern code example below). If not, read about explicit programming and defensive programming. In reality, we tend to avoid it due to the annoyance of coding it (e.g. think of validating hierarchical JSON object with fields like email and dates) – libraries like Joi and Validator turns this tedious task into a breeze. + +### Wikipedia: Defensive Programming + +Defensive programming is an approach to improve software and source code, in terms of: General quality – reducing the number of software bugs and problems. Making the source code comprehensible – the source code should be readable and understandable so it is approved in a code audit. Making the software behave in a predictable manner despite unexpected inputs or user actions. + + + +### Code example: validating complex JSON input using ‘Joi’ + +```javascript +var memberSchema = Joi.object().keys({ + password: Joi.string().regex(/^[a-zA-Z0-9]{3,30}$/), + birthyear: Joi.number().integer().min(1900).max(2013), + email: Joi.string().email() +}); + +function addNewMember(newMember) +{ + //assertions comes first + if(Joi.validate(newMember), memberSchema, (err, value) => throw Error("Invalid input)); + //other logic here +} + +``` + +### Anti-pattern: no validation yields nasty bugs + +```javascript +//if the discount is positive let's then redirect the user to pring his discount coupons +function redirectToPrintDiscount(httpResponse, member, discount) +{ + if(discount != 0) + httpResponse.redirect(`/discountPrintView/${member.id}`); +} + +redirectToPrintDiscount(httpResponse, someMember); +//forgot to pass the parameter discount, why the heck was the user redirected to the discount screen? + +``` + +### Blog Quote: "You should throw these errors immediately" + From the blog: Joyent + + > A degenerate case is where someone calls an asynchronous function but doesn’t pass a callback. You should throw these errors immediately, since the program is broken and the best chance of debugging it involves getting at least a stack trace and ideally a core file at the point of the error. To do this, we recommend validating the types of all arguments at the start of the function. \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/monitoring.md b/translations/spanish/sections/errorhandling/monitoring.md new file mode 100644 index 000000000..5f9f983ad --- /dev/null +++ b/translations/spanish/sections/errorhandling/monitoring.md @@ -0,0 +1,18 @@ +# Title + + +### One Paragraph Explainer + +> At the very basic level, monitoring means you can *easily identify when bad things happen at production. For example, by getting notified by email or Slack. The challenge is to choose the right set of tools that will satisfy your requirements without breaking your bank. May I suggest, start with defining the core set of metrics that must be watched to ensure a healthy state – CPU, server RAM, Node process RAM (less than 1.4GB), the amount of errors in the last minute, number of process restarts, average response time. Then go over some advanced features you might fancy and add to your wish list. Some examples of luxury monitoring feature: DB profiling, cross-service measuring (i.e. measure business transaction), frontend integration, expose raw data to custom BI clients, Slack notifications and many others. + +Achieving the advanced features demands lengthy setup or buying a commercial product such as Datadog, newrelic and a like. Unfortunately, achieving also the basics is not a walk in the park as some metrics are hardware-related (CPU) and others live within the node process (internal errors) thus All the straightforward tools require some additional setup. For example, cloud vendor monitoring solutions (e.g. AWS CloudWatch, Google StackDriver) will tell you immediately about the hardware metric but nothing about the internal app behavior. On the other end, Log-based solutions such as ElaticSeatch lack by default the hardware view. The solution is to augment your choice with missing metrics, for example, a popular choice is sending application logs to Elastic stack and configure some additional agent (e.g. Beat) to share with it hardware-related information to get the full picture. + + +### Blog Quote: "We have a problem with promises" + From the blog pouchdb.com, ranked 11 for the keywords “Node Promises” + + > … We recommend you to watch these signals for all of your services: Error Rate: Because errors are user facing and immediately affect your customers. +Response time: Because the latency directly affects your customers and business. +Throughput: The traffic helps you to understand the context of increased error rates and the latency too. +Saturation: It tells how “full” your service is. If the CPU usage is 90%, can your system handle more traffic? +… diff --git a/translations/spanish/sections/errorhandling/operationalvsprogrammererror.md b/translations/spanish/sections/errorhandling/operationalvsprogrammererror.md new file mode 100644 index 000000000..6eee0ee84 --- /dev/null +++ b/translations/spanish/sections/errorhandling/operationalvsprogrammererror.md @@ -0,0 +1,51 @@ +# Distinguish operational vs programmer errors + +### One Paragraph Explainer + +Distinguishing the following two error types will minimize your app downtime and helps avoid crazy bugs: Operational errors refer to situations where you understand what happened and the impact of it – for example, a query to some HTTP service failed due to connection problem. On the other hand, programmer errors refer to cases where you have no idea why and sometimes where an error came from – it might be some code that tried to read undefined value or DB connection pool that leaks memory. Operational errors are relatively easy to handle – usually logging the error is enough. Things become hairy when a programmer error pop-up, the application might be in an inconsistent state and there’s nothing better you can do than restart gracefully + + + +### Code Example – marking an error as operational (trusted) + +```javascript +//marking an error object as operational +var myError = new Error("How can I add new product when no value provided?"); +myError.isOperational = true; + +//or if you're using some centralized error factory (see other examples at the bullet "Use only the built-in Error object") +function appError(commonType, description, isOperational) { + Error.call(this); + Error.captureStackTrace(this); + this.commonType = commonType; + this.description = description; + this.isOperational = isOperational; +}; + +throw new appError(errorManagement.commonErrors.InvalidInput, "Describe here what happened", true); + +``` + +### Blog Quote: "Programmer errors are bugs in the program" +From the blog Joyent, ranked 1 for the keywords “Node.JS error handling” + + > …The best way to recover from programmer errors is to crash immediately. You should run your programs using a restarter that will automatically restart the program in the event of a crash. With a restarter in place, crashing is the fastest way to restore reliable service in the face of a transient programmer error… + + ### Blog Quote: "No safe way to leave without creating some undefined brittle state" +From Node.JS official documentation + + > …By the very nature of how throw works in JavaScript, there is almost never any way to safely “pick up where you left off”, without leaking references, or creating some other sort of undefined brittle state. The safest way to respond to a thrown error is to shut down the process. Of course, in a normal web server, you might have many connections open, and it is not reasonable to abruptly shut those down because an error was triggered by someone else. The better approach is to send an error response to the request that triggered the error, while letting the others finish in their normal time, and stop listening for new requests in that worker. + + + ### Blog Quote: "Otherwise you risk the state of your application" +From the blog debugable.com, ranked 3 for the keywords “Node.JS uncaught exception” + + > …So, unless you really know what you are doing, you should perform a graceful restart of your service after receiving an “uncaughtException” exception event. Otherwise you risk the state of your application, or that of 3rd party libraries to become inconsistent, leading to all kinds of crazy bugs… + + ### Blog Quote: "Blog Quote: There are three schools of thoughts on error handling" +From the blog: JS Recipes + + > …There are primarily three schools of thoughts on error handling: +1. Let the application crash and restart it. +2. Handle all possible errors and never crash. +3. Balanced approach between the two diff --git a/translations/spanish/sections/errorhandling/shuttingtheprocess.md b/translations/spanish/sections/errorhandling/shuttingtheprocess.md new file mode 100644 index 000000000..dc488c6df --- /dev/null +++ b/translations/spanish/sections/errorhandling/shuttingtheprocess.md @@ -0,0 +1,54 @@ +# Shut the process gracefully when a stranger comes to town + + +### One Paragraph Explainer + +Somewhere within your code, an error handler object is responsible for deciding how to proceed when an error comes in – if the error is trusted (i.e. operational error, see further explanation within best practice #3) then writing to log file might be enough. Things get hairy if the error is not familiar – this means that some component might be in a fault state and all future requests are subject to failure. For example, assuming a singleton, stateful token issuer service that threw an exception and lost its state – from now it might behave unexpectedly and cause all requests to fail. Under this scenario, kill the process and use a ‘Restarter tool’ (like Forever, PM2, etc) to start with a clean slate. + + + +### Code example: deciding whether to crash + +```javascript +//deciding whether to crash when an uncaught exception arrives +//Assuming developers mark known operational errors with error.isOperational=true, read best practice #3 +process.on('uncaughtException', function(error) { + errorManagement.handler.handleError(error); + if(!errorManagement.handler.isTrustedError(error)) + process.exit(1) +}); + + +//centralized error handler encapsulates error-handling related logic +function errorHandler(){ + this.handleError = function (error) { + return logger.logError(err).then(sendMailToAdminIfCritical).then(saveInOpsQueueIfCritical).then(determineIfOperationalError); + } + + this.isTrustedError = function(error) + { + return error.isOperational; + } + +``` + + +### Blog Quote: "The best way is to crash" + FFrom the blog Joyent + + > …The best way to recover from programmer errors is to crash immediately. You should run your programs using a restarter that will automatically restart the program in the event of a crash. With a restarter in place, crashing is the fastest way to restore reliable service in the face of a transient programmer error… + + +### Blog Quote: "There are three schools of thoughts on error handling" + From the blog: JS Recipes + + > …There are primarily three schools of thoughts on error handling: +1. Let the application crash and restart it. +2. Handle all possible errors and never crash. +3. Balanced approach between the two + + +### Blog Quote: "No safe way to leave without creating some undefined brittle state" +From Node.JS official documentation + + > …By the very nature of how throw works in JavaScript, there is almost never any way to safely “pick up where you left off”, without leaking references, or creating some other sort of undefined brittle state. The safest way to respond to a thrown error is to shut down the process. Of course, in a normal web server, you might have many connections open, and it is not reasonable to abruptly shut those down because an error was triggered by someone else. The better approach is to send an error response to the request that triggered the error, while letting the others finish in their normal time, and stop listening for new requests in that worker. \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/testingerrorflows.md b/translations/spanish/sections/errorhandling/testingerrorflows.md new file mode 100644 index 000000000..0684cab7f --- /dev/null +++ b/translations/spanish/sections/errorhandling/testingerrorflows.md @@ -0,0 +1,37 @@ +# Test error flows using your favorite test framework + + +### One Paragraph Explainer + +Testing ‘happy’ paths is no better than testing failures. Good testing code coverage demands to test exceptional paths. Otherwise, there is no trust that exceptions are indeed handled correctly. Every unit testing framework, like Mocha & Chai, has a support for exception testing (code examples below). If you find it tedious to test every inner function and exception – you may settle with testing only REST API HTTP errors. + + + +### Code example: ensuring the right exception is thrown using Mocha & Chai + +```javascript +describe("Facebook chat", () => { + it("Notifies on new chat message", () => { + var chatService = new chatService(); + chatService.participants = getDisconnectedParticipants(); + expect(chatService.sendMessage.bind({message: "Hi"})).to.throw(ConnectionError); + }); +}); + +``` + +### Code example: ensuring API returns the right HTTP error code + +```javascript +it("Creates new Facebook group", function (done) { + var invalidGroupInfo = {}; + httpRequest({method: 'POST', uri: "facebook.com/api/groups", resolveWithFullResponse: true, body: invalidGroupInfo, json: true + }).then((response) => { + //oh no if we reached here than no exception was thrown + }).catch(function (response) { + expect(400).to.equal(response.statusCode); + done(); + }); + }); + +``` \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/usematurelogger.md b/translations/spanish/sections/errorhandling/usematurelogger.md new file mode 100644 index 000000000..22b206eb0 --- /dev/null +++ b/translations/spanish/sections/errorhandling/usematurelogger.md @@ -0,0 +1,51 @@ +# Use a mature logger to increase errors visibility + + +### One Paragraph Explainer + +We all loovve console.log but obviously a reputable and persisted Logger like Winston, Bunyan or L4JS is mandatory for serious projects. A set of practices and tools will help to reason about errors much quicker – (1) log frequently using different levels (debug, info, error), (2) when logging, provide contextual information as JSON objects, see example below. (3) watch and filter logs using a log querying API (built-in in most loggers) or a log viewer software +(4) Expose and curate log statement for the operation team using operational intelligence tool like Splunk + + + +### Code Example – Winston Logger in action + +```javascript +//your centralized logger object +var logger = new winston.Logger({ + level: 'info', + transports: [ + new (winston.transports.Console)(), + new (winston.transports.File)({ filename: 'somefile.log' }) + ] + }); + +//custom code somewhere using the logger +logger.log('info', 'Test Log Message with some parameter %s', 'some parameter', { anything: 'This is metadata' }); + +``` + +### Code Example – Querying the log folder (searching for entries) + +```javascript +var options = { + from: new Date - 24 * 60 * 60 * 1000, until: new Date, limit: 10, start: 0, + order: 'desc', fields: ['message'] + }; + + + // Find items logged between today and yesterday. + winston.query(options, function (err, results) { + //callback with results + }); + +``` + +### Blog Quote: "Logger Requirements" + From the blog Strong Loop + + > Lets identify a few requirements (for a logger): +1. Time stamp each log line. This one is pretty self explanatory – you should be able to tell when each log entry occured. +2. Logging format should be easily digestible by humans as well as machines. +3. Allows for multiple configurable destination streams. For example, you might be writing trace logs to one file but when an error is encountered, write to the same file, then into error file and send an email at the same time… + \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/useonlythebuiltinerror.md b/translations/spanish/sections/errorhandling/useonlythebuiltinerror.md new file mode 100644 index 000000000..deb372b10 --- /dev/null +++ b/translations/spanish/sections/errorhandling/useonlythebuiltinerror.md @@ -0,0 +1,78 @@ +# Use only the built-in Error object + + +### One Paragraph Explainer + +The permissive nature of JS along with its variety code-flow options (e.g. EventEmitter, Callbacks, Promises, etc) pushes to great variance in how developers raise errors – some use strings, other define their own custom types. Using Node.JS built-in Error object helps to keep uniformity within your code and with 3rd party libraries, it also preserves significant information like the StackTrace. When raising the exception, it’s usually a good practice to fill it with additional contextual properties like the error name and the associated HTTP error code. To achieve this uniformity and practices, consider extending the Error object with additional properties, see code example below +Blog Quote: “I don’t see the value in having lots of different types” +From the blog Ben Nadel, ranked 5 for the keywords “Node.JS error object” +…”Personally, I don’t see the value in having lots of different types of error objects – JavaScript, as a language, doesn’t seem to cater to Constructor-based error-catching. As such, differentiating on an object property seems far easier than differentiating on a Constructor type… + + + +### Code Example – doing it right + +```javascript +//throwing an Error from typical function, whether sync or async + if(!productToAdd) + throw new Error("How can I add new product when no value provided?"); + +//'throwing' an Error from EventEmitter +const myEmitter = new MyEmitter(); +myEmitter.emit('error', new Error('whoops!')); + +//'throwing' an Error from a Promise + return new promise(function (resolve, reject) { + Return DAL.getProduct(productToAdd.id).then((existingProduct) =>{ + if(existingProduct != null) + reject(new Error("Why fooling us and trying to add an existing product?")); + +``` + +### Code example – Anti Pattern + +```javascript +//throwing a String lacks any stack trace information and other important properties +if(!productToAdd) + throw ("How can I add new product when no value provided?"); + +``` + +### Code example – doing it even better + +```javascript +//centralized error object that derives from Node’s Error +function appError(name, httpCode, description, isOperational) { + Error.call(this); + Error.captureStackTrace(this); + this.name = name; + //...other properties assigned here +}; + +appError.prototype.__proto__ = Error.prototype; + +module.exports.appError = appError; + +//client throwing an exception +if(user == null) + throw new appError(commonErrors.resourceNotFound, commonHTTPErrors.notFound, "further explanation", true) +``` + + +### Blog Quote: "A string is not an error" +From the blog devthought.com, ranked 6 for the keywords “Node.JS error object” + + > …passing a string instead of an error results in reduced interoperability between modules. It breaks contracts with APIs that might be performing instanceof Error checks, or that want to know more about the error. Error objects, as we’ll see, have very interesting properties in modern JavaScript engines besides holding the message passed to the constructor… +Blog Quote: “All JavaScript and System errors raised by Node.js inherit from Error” +From Node.JS official documentation +…All JavaScript and System errors raised by Node.js inherit from, or are instances of, the standard JavaScript Error class and are guaranteed to provide at least the properties available on that class. A generic JavaScript Error object that does not denote any specific circumstance of why the error occurred. Error objects capture a “stack trace” detailing the point in the code at which the Error was instantiated, and may provide a text description of the error.All errors generated by Node.js, including all System and JavaScript errors, will either be instances of, or inherit from, the Error class… + +### Blog Quote: "Inheriting from Error doesn’t add too much value" +From the blog machadogj + + > …One problem that I have with the Error class is that is not so simple to extend. Of course you can inherit the class and create your own Error classes like HttpError, DbError, etc. However that takes time, and doesn’t add too much value unless you are doing something with types. Sometimes, you just want to add a message, and keep the inner error, and sometimes you might want to extend the error with parameters, and such… + + ### Blog Quote: "All JavaScript and System errors raised by Node.js inherit from Error" +From Node.JS official documentation + + > ……All JavaScript and System errors raised by Node.js inherit from, or are instances of, the standard JavaScript Error class and are guaranteed to provide at least the properties available on that class. A generic JavaScript Error object that does not denote any specific circumstance of why the error occurred. Error objects capture a “stack trace” detailing the point in the code at which the Error was instantiated, and may provide a text description of the error.All errors generated by Node.js, including all System and JavaScript errors, will either be instances of, or inherit from, the Error class… diff --git a/translations/spanish/sections/production/apmproducts.md b/translations/spanish/sections/production/apmproducts.md new file mode 100644 index 000000000..9d464f1c7 --- /dev/null +++ b/translations/spanish/sections/production/apmproducts.md @@ -0,0 +1,27 @@ +# Sure user experience with APM products + +

+ + +### One Paragraph Explainer + +APM, application performance monitoring refers to a familiy of products that aims to monitor application performance from end to end, also from the customer persepective. While traditional monitoring solutions focuses on Exceptions and standlone technical metrics (e.g. error tracking, slow server endpoints, etc), in real world our app might create disappointed users without any code exceptions, for example if some middleware service performed real slow. APM products measure the user experience from end to end, for example, given a system that encompass frontend UI and multiple distirbuted services – some APM products can tell how fast a transaction that spans multiple tiers last. It can tell whether the user experience is solid and point to the problem. This attractive offering comes with a relativelly high price tag hence it’s recommended for large-scale and complex products that require to go beyond straightforwd monitoring. + +

+ + +### APM example – a commercial product that visualize cross-service app performance + +![APM example](/assets/images/apm1.png "APM example") + +

+ +### APM example – a commercial product that emphasize the user experience score + +![APM example](/assets/images/apm2.png "APM example") + +

+ +### APM example – a commercial product that highlights slow code paths + +![APM example](/assets/images/apm3.png "APM example") diff --git a/translations/spanish/sections/production/assigntransactionid.md b/translations/spanish/sections/production/assigntransactionid.md new file mode 100644 index 000000000..71402f692 --- /dev/null +++ b/translations/spanish/sections/production/assigntransactionid.md @@ -0,0 +1,41 @@ +# Assign ‘TransactionId’ to each log statement + +

+ + +### One Paragraph Explainer + +A typical log is a warehouse of entries from all components and requests. Upon detection of some suspicious line or error it becomes hairy to match other lines that belong to the same specific flow (e.g. the user “John” tried to buy something). This becomes even more critical and challenging in microservices environment when a request/transaction might span across multiple computers. Address this by assigning a unique transaction Id value to all the entries from the same request so when detecting one line one can copy the id and search for every line that has similar transaction Id. However, achieving this In Node is not straightforward as a single thread is used to serve all requests –consider using a library that that can group data on the request level – see code example on the next slide. When calling other microservice, pass the transaction Id using an HTTP header “x-transaction-id” to keep the same context. + +

+ + +### Code example: typical nginx configuration + +```javascript +//when receiving a new request, start a new isolated context and set a transaction Id. The following example is using the NPM library continuation-local-storage to isolate requests + +var createNamespace = require('continuation-local-storage').createNamespace; +var session = createNamespace('my session'); + router.get('/:id', (req, res, next) => { + session.set('transactionId', 'some unique GUID'); + someService.getById(req.params.id); + logger.info('Starting now to get something by Id'); +}//Now any other service or components can have access to the contextual, per-request, data +class someService { + getById(id) { + logger.info(“Starting now to get something by Id”); + //other logic comes here + } +}//Logger can now append transaction-id to each entry, so that entries from the same request will have the same value +class logger{ + info (message) + {console.log(`message ${session.get('transactionId')}`);} +} +``` + +

+ +### What Other Bloggers Say +From the blog [ARG! TEAM](http://blog.argteam.com/coding/hardening-node-js-for-production-part-2-using-nginx-to-avoid-node-js-load): +> ...Although express.js has built in static file handling through some connect middleware, you should never use it. *Nginx can do a much better job of handling static files and can prevent requests for non-dynamic content from clogging our node processes*... diff --git a/translations/spanish/sections/production/bestateless.md b/translations/spanish/sections/production/bestateless.md new file mode 100644 index 000000000..29d2bec0e --- /dev/null +++ b/translations/spanish/sections/production/bestateless.md @@ -0,0 +1,39 @@ +# Be stateless, kill your Servers almost every day + +

+ + +### One Paragraph Explainer + +Have you ever encountered a severe production issue where one server was missing some piece of configuration or data? That is probably due to some unnecessary dependency on some local asset that is not part of the deployment. Many successful products treat servers like a phoenix bird – it dies and rebirth periodically without any damage. In other words, a server is just a piece of hardware that executes your code for some time and then get replaced. +This approach: +1. allows to scale by adding and removing servers dynamically without any side-affect +2. simplifies the maintenance as it frees our mind from evaluating each server state. + +

+ + +### Code example: anti-patterns + +```javascript +//Typical mistake 1: saving uploaded files locally in a server +var multer = require('multer') //express middleware for fetching uploads +var upload = multer({ dest: 'uploads/' }) +app.post('/photos/upload', upload.array('photos', 12), function (req, res, next) {}) +//Typical mistake 2: storing authentication sessions (passport) in a local file or memory +var FileStore = require('session-file-store')(session); +app.use(session({ + store: new FileStore(options), + secret: 'keyboard cat' +})); +//Typical mistake3: storing information on the global object +Global.someCacheLike.result = {somedata} +``` + +

+ +### What Other Bloggers Say +From the blog [Martin Fowler](https://martinfowler.com/bliki/PhoenixServer.html): +> ...One day I had this fantasy of starting a certification service for operations. The certification assessment would consist of a colleague and I turning up at the corporate data center and setting about critical production servers with a baseball bat, a chainsaw, and a water pistol. The assessment would be based on how long it would take for the operations team to get all the applications up and running again. This may be a daft fantasy, but there’s a nugget of wisdom here. While you should forego the baseball bats, it is a good idea to virtually burn down your servers at regular intervals. A server should be like a phoenix, regularly rising from the ashes... + +

diff --git a/translations/spanish/sections/production/createmaintenanceendpoint.md b/translations/spanish/sections/production/createmaintenanceendpoint.md new file mode 100644 index 000000000..08ded442f --- /dev/null +++ b/translations/spanish/sections/production/createmaintenanceendpoint.md @@ -0,0 +1,35 @@ +# Create a maintenance endpoint + +

+ + +### One Paragraph Explainer + +A maintenance endpoint is a plain secured HTTP API that is part of the app code and it’s purpose if for the ops/production team to view and invoke multiple useful functionality. For example, it can return a head dump (memory snapshot) of the process, report whether there are some memory leaks and even allow to execute REPL command directly. This endpoint is needed where the conventional devops tools (monitoring products, logs, etc) fails to gather some specific type of information or you choose not to buy/install such tools. The golden rule is using professional and external tools for monitoring and maintaining the production, these are usually more robust and accurate. That said, there are likely to be cases where the generic tools will fail to extract information that is specific to Node or to your app – for example, should you wish to generate a memory snapshot at the moment GC completed a cycle – few NPM libraries will be glad to perform this for you but popular monitoring tools will likely to miss this functionality + +

+ + +### Code example: generating a head dump via code + +```javascript +var heapdump = require('heapdump'); + +router.get('/ops/headump', (req, res, next) => { + logger.info(`About to generate headump`); + heapdump.writeSnapshot(function (err, filename) { + console.log('headump file is ready to be sent to the caller', filename); + fs.readFile(filename, "utf-8", function (err, data) { + res.end(data); + }); + }); +}); +``` + +

+ +### Recommended Watch + +▶ [Getting your Node.js app production ready](http://mubaloo.com/best-practices-deploying-node-js-applications) + +![Getting your Node.js app production ready](/assets/images/createmaintenanceendpoint1.png "Getting your Node.js app production ready") diff --git a/translations/spanish/sections/production/delegatetoproxy.md b/translations/spanish/sections/production/delegatetoproxy.md new file mode 100644 index 000000000..587eb9221 --- /dev/null +++ b/translations/spanish/sections/production/delegatetoproxy.md @@ -0,0 +1,50 @@ +# Delegate anything possible (e.g. static content, gzip) to a reverse proxy + +

+ + +### One Paragraph Explainer + +It’s very tempting to cargo-cult Express and use its rich middleware offering for networking related tasks like serving static files, gzip encoding, throttling requests, SSL termination, etc. This is a performance kill due to its single threaded model which will keep the CPU busy for long periods (Remember, Node’s execution model is optimized for short tasks or async IO related tasks). A better approach is to use a tool that expertise in networking tasks – the most popular are nginx and HAproxy which are also used by the biggest cloud vendors to lighten the incoming load on node.js processes. + +

+ + +### Code Example – explanation + +```javascript +gzip on; +#defining gzip compression +gzip_comp_level 6; +gzip_vary on; +upstream myApplication { + server 127.0.0.1:3000; + server 127.0.0.1:3001; + keepalive 64; +} + +#defining web server +server { + listen 80; + listen 443 ssl; + ssl_certificate /some/location/sillyfacesociety.com.bundle.crt; + error_page 502 /errors/502.html; + #handling static content + location ~ ^/(images/|img/|javascript/|js/|css/|stylesheets/|flash/|media/|static/|robots.txt|humans.txt|favicon.ico) { + root /usr/local/silly_face_society/node/public; + access_log off; + expires max; +} +``` + +

+ +### What Other Bloggers Say + +* From the blog [Mubaloo](http://mubaloo.com/best-practices-deploying-node-js-applications): +> …It’s very easy to fall into this trap – You see a package like Express and think “Awesome! Let’s get started” – you code away and you’ve got an application that does what you want. This is excellent and, to be honest, you’ve won a lot of the battle. However, you will lose the war if you upload your app to a server and have it listen on your HTTP port, because you’ve forgotten a very crucial thing: Node is not a web server. **As soon as any volume of traffic starts to hit your application, you’ll notice that things start to go wrong: connections are dropped, assets stop being served or, at the very worst, your server crashes. What you’re doing is attempting to have Node deal with all of the complicated things that a proven web server does really well. Why reinvent the wheel?** +> **This is just for one request, for one image and bearing in mind this is memory that your application could be using for important stuff like reading a database or handling complicated logic; why would you cripple your application for the sake of convenience?** + + +* From the blog [Argteam](http://blog.argteam.com/coding/hardening-node-js-for-production-part-2-using-nginx-to-avoid-node-js-load): +> Although express.js has built in static file handling through some connect middleware, you should never use it. **Nginx can do a much better job of handling static files and can prevent requests for non-dynamic content from clogging our node processes**… diff --git a/translations/spanish/sections/production/detectvulnerabilities.md b/translations/spanish/sections/production/detectvulnerabilities.md new file mode 100644 index 000000000..9cc850d84 --- /dev/null +++ b/translations/spanish/sections/production/detectvulnerabilities.md @@ -0,0 +1,26 @@ +# Use tools that automatically detect vulnerabilities + +

+ + +### One Paragraph Explainer + +I really love the following words from a StrongLoop’s blog: “The security of your app is only as strong as the weakest link in your dependencies”. Code dependencies in fact tend to have vulnerabilities often, even the most famous and battle tested packages. for example, a threat was detected in a previous version of Express that might expose the user to a cross-site scripting attack. Luckily, community and commercial tools (all have free plans, at least for public repositories) such as nsp and snyk can keep an automatic eye on these threats, warn the team and the later can even patch these vulnerabilities automatically + +

+ +### What Other Bloggers Say +From the [StrongLoop](Best Practices for Express in Production): + +> ...Using to manage your application’s dependencies is powerful and convenient. But the packages that you use may contain critical security vulnerabilities that could also affect your application. The security of your app is only as strong as the “weakest link” in your dependencies. Fortunately, there are two helpful tools you can use to ensure of the third-party packages you use: and requireSafe. These two tools do largely the same thing, so using both might be overkill, but “better safe than sorry” are words to live by when it comes to security... + +

+ +### Code example: typical nginx configuration + +```javascript +//using a single line of code will attach 7 protecting middleware to Express appapp.use(helmet()); +//additional configurations can be applied on demand, this one mislead the caller to think we’re using PHP 🙂 +app.use(helmet.hidePoweredBy({ setTo: 'PHP 4.2.0' }));//other middleware are not activated by default and requires explicit configuration . +app.use(helmet.referrerPolicy({ policy: 'same-origin' })); +```` \ No newline at end of file diff --git a/translations/spanish/sections/production/frontendout.md b/translations/spanish/sections/production/frontendout.md new file mode 100644 index 000000000..20af87e8e --- /dev/null +++ b/translations/spanish/sections/production/frontendout.md @@ -0,0 +1,41 @@ +# Get your frontend assets out of Node + +

+ + +### One Paragraph Explainer + +In a classic web app the backend serves the frontend/graphics to the browser, a very common approach in the Node’s world is to use Express static middleware for streamlining staitc files to the client. BUT – Node is not a typical webapp as it utilizes a single thread that is not optimized to serve many files at once. Instead, consider using a reverse proxy, cloud storage or CDN (e.g. Nginx, AWS S3, Azure Blob Storage, etc) that utilizes many optimizations for this task and gain much better throughput. For example, specializes middleware like nginx embodies direct hook between the file system and the network card and multi-thread approach to minimize intervention among multiple request. + +Your optimal solution might wear one of the following forms: +1. A reverse proxy – your static files will be located right next to your Node application, only requests to the static files folder will be served by a proxy that sits in front of your Node app such as nginx. Using this approach, your Node app is responsible deploying the static files but not to serve them. Your frontend’s colleague will love this approach as it prevents cross-origin-requests from the frontend. +2. Cloud storage – your static files will NOT be part of your Node app content, else they will be uploaded to services like AWS S3, Azure BlobStorage, or other similar services that were born for this mission. Using this approach, your Node app is not responsible deploying the static files neither to serve them, hence a complete decoupling is drawn between Node and the Frontend which is any way handled by different teams. + +

+ + +### Code example: typical nginx configuration for serving static files + +```javascript +gzip on; +#defining gzip compression +keepalive 64; +}#defining web server +server { +listen 80; +listen 443 ssl;#handling static content +location ~ ^/(images/|img/|javascript/|js/|css/|stylesheets/|flash/|media/|static/|robots.txt|humans.txt|favicon.ico) { +root /usr/local/silly_face_society/node/public; +access_log off; +expires max; +} +``` + +

+ +### What Other Bloggers Say +From the blog [StrongLoop](https://strongloop.com/strongblog/best-practices-for-express-in-production-part-two-performance-and-reliability/): + +>…In development, you can use [res.sendFile()](http://expressjs.com/4x/api.html#res.sendFile) to serve static files. But don’t do this in production, because this function has to read from the file system for every file request, so it will encounter significant latency and affect the overall performance of the app. Note that res.sendFile() is not implemented with the sendfile system call, which would make it far more efficient. Instead, use serve-static middleware (or something equivalent), that is optimized for serving files for Express apps. An even better option is to use a reverse proxy to serve static files; see Use a reverse proxy for more information… + +

diff --git a/translations/spanish/sections/production/guardprocess.md b/translations/spanish/sections/production/guardprocess.md new file mode 100644 index 000000000..523d67f6a --- /dev/null +++ b/translations/spanish/sections/production/guardprocess.md @@ -0,0 +1,19 @@ +# Guard and restart your process upon failure (using the right tool) + +

+ + +### One Paragraph Explainer + +At the base level, Node processes must be guarded and restarted upon failures. Simply put, for small apps and those who don’t use containers – tools like [PM2](https://www.npmjs.com/package/pm2-docker) are perfect as they bring simplicity, restarting capabilities and also rich integration with Node. Others with strong Linux skills might use systemd and run Node as a service. Things get more interesting for apps that uses Docker or any container technology since those are usually accompanies by cluster management tools (e.g. (AWS ECS)[http://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html], [Kubernetes](https://kubernetes.io/), etc) that deploy monitor and heal containers. Having all those rich cluster management features including container restart, why mess-up with other tools like PM2? there’s no bullet proof answer. There are good reasons to keep PM2 within containers (mostly its containers specific version [pm2-docker](https://www.npmjs.com/package/pm2-docker)) as the first guarding tier – it’s much faster to restart a process and provide Node-specific features like flagging to the code when the hosting container asks to gracefully restart. Other might choose to avoid unnecessary layers. To conclude this write-up, no solution suits them all and getting to know the options is the important thing + +

+ + +### What Other Bloggers Say + +* From the [Express Production Best Practices](https://expressjs.com/en/advanced/best-practice-performance.html): +> ... In development, you started your app simply from the command line with node server.js or something similar. **But doing this in production is a recipe for disaster. If the app crashes, it will be offline** until you restart it. To ensure your app restarts if it crashes, use a process manager. A process manager is a “container” for applications that facilitates deployment, provides high availability, and enables you to manage the application at runtime. + +* From the Medium blog post [Understanding Node Clustering](https://medium.com/@CodeAndBiscuits/understanding-nodejs-clustering-in-docker-land-64ce2306afef#.cssigr5z3): +> ... Understanding NodeJS Clustering in Docker-Land “Docker containers are streamlined, lightweight virtual environments, designed to simplify processes to their bare minimum. Processes that manage and coordinate their own resources are no longer as valuable. **Instead, management stacks like Kubernetes, Mesos, and Cattle have popularized the concept that these resources should be managed infrastructure-wide**. CPU and memory resources are allocated by “schedulers”, and network resources are managed by stack-provided load balancers. \ No newline at end of file diff --git a/translations/spanish/sections/production/lockdependencies.md b/translations/spanish/sections/production/lockdependencies.md new file mode 100644 index 000000000..35fb62b14 --- /dev/null +++ b/translations/spanish/sections/production/lockdependencies.md @@ -0,0 +1,74 @@ +# Lock dependencies + +

+ + +### One Paragraph Explainer + + + +Your code depends on many external packages, let’s say it ‘requires’ and use momentjs-2.1.4, then by default when you deploy to production NPM might fetch momentjs 2.1.5 which unfortunately brings some new bugs to the table. Using NPM config files and settings –save-exact=true instructs NPM to refer to the *exact* same version that was installed so the next time you run “NPM install” (at production or within a Docker container you plan to ship forward for testing) the same dependent version will be fetched. An alternative popular approach is using a .shrinkwrap file (easily generated using NPM) that states exactly which packages and versions should be installed so no environement can get tempt to fetch newer versions. + +* **Update:** as of NPM 5, dependencies are locked automatically using .shrinkwrap. Yarn, an emerging package manager, also locks down dependencies by default + + +

+ + +### Code example: .npmrc file that instructs NPM to use exact versions + +```javascript +//save this as .npmrc file on the project directory +save-exact:true +``` + +

+ +### Code example: shirnkwrap.json file that distill the exact depedency tree + +```javascript +{ + "name": "A", + "dependencies": { + "B": { + "version": "0.0.1", + "dependencies": { + "C": { + "version": "0.1.0" + } + } + } + } +} +``` + +

+ +### Code example: NPM 5 dependencies lock file – package.json + +```javascript +{ + "name": "package-name", + "version": "1.0.0", + "lockfileVersion": 1, + "dependencies": { + "cacache": { + "version": "9.2.6", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-9.2.6.tgz", + "integrity": "sha512-YK0Z5Np5t755edPL6gfdCeGxtU0rcW/DBhYhYVDckT+7AFkCCtedf2zru5NRbBLFk6e7Agi/RaqTOAfiaipUfg==" + }, + "duplexify": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.5.0.tgz", + "integrity": "sha1-GqdzAC4VeEV+nZ1KULDMquvL1gQ=", + "dependencies": { + "end-of-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.0.0.tgz", + "integrity": "sha1-1FlucCc0qT5A6a+GQxnqvZn/Lw4=" + } + } + } + } +} +``` diff --git a/translations/spanish/sections/production/measurememory.md b/translations/spanish/sections/production/measurememory.md new file mode 100644 index 000000000..53cd84eba --- /dev/null +++ b/translations/spanish/sections/production/measurememory.md @@ -0,0 +1,26 @@ +# Measure and guard the memory usage + +

+ + +### One Paragraph Explainer + +In a perfect world, a web developer shouldn’t deal with memory leaks. In reality, memory issues are a known Node’s gotcha one must be aware of. Above all, memory usage must be monitored constantly. In development and small production sites you may gauge manually using Linux commands or NPM tools and libraries like node-inspector and memwatch. The main drawback of this manual activities is that they require a human being actively monitoring – for serious production sites it’s absolutely vital to use robust monitoring tools e.g. (AWS CloudWatch, DataDog or any similar proactive system) that alerts when a leak happens. There are also few development guidelines to prevent leaks: avoid storing data on the global level, use streams for data with dynamic size, limit variables scope using let and const. + +

+ +### What Other Bloggers Say + +* From the blog [Dyntrace](http://apmblog.dynatrace.com/): +> ... ”As we already learned, in Node.js JavaScript is compiled to native code by V8. The resulting native data structures don’t have much to do with their original representation and are solely managed by V8. This means that we cannot actively allocate or deallocate memory in JavaScript. V8 uses a well-known mechanism called garbage collection to address this problem.” + +* From the blog [Dyntrace](http://blog.argteam.com/coding/hardening-node-js-for-production-part-2-using-nginx-to-avoid-node-js-load): +> ... “Although this example leads to obvious results the process is always the same: +Create heap dumps with some time and a fair amount of memory allocation in between +Compare a few dumps to find out what’s growing” + +* From the blog [Dyntrace](http://blog.argteam.com/coding/hardening-node-js-for-production-part-2-using-nginx-to-avoid-node-js-load): +> ... “fault, Node.js will try to use about 1.5GBs of memory, which has to be capped when running on systems with less memory. This is the expected behaviour as garbage collection is a very costly operation. +The solution for it was adding an extra parameter to the Node.js process: +node –max_old_space_size=400 server.js –production ” +“Why is garbage collection expensive? The V8 JavaScript engine employs a stop-the-world garbage collector mechanism. In practice, it means that the program stops execution while garbage collection is in progress.” \ No newline at end of file diff --git a/translations/spanish/sections/production/monitoring.md b/translations/spanish/sections/production/monitoring.md new file mode 100644 index 000000000..8ba2b3c7a --- /dev/null +++ b/translations/spanish/sections/production/monitoring.md @@ -0,0 +1,39 @@ +# Monitoring! + +

+ +### One Paragraph Explainer + +At the very basic level, monitoring means you can *easily identify when bad things happen at production. For example, by getting notified by email or Slack. The challenge is to choose the right set of tools that will satisfy your requirements without breaking your bank. May I suggest, start with defining the core set of metrics that must be watched to ensure a healthy state – CPU, server RAM, Node process RAM (less than 1.4GB), the amount of errors in the last minute, number of process restarts, average response time. Then go over some advanced features you might fancy and add to your wish list. Some examples of luxury monitoring feature: DB profiling, cross-service measuring (i.e. measure business transaction), frontend integration, expose raw data to custom BI clients, Slack notifications and many others. + +Achieving the advanced features demands lengthy setup or buying a commercial product such as Datadog, newrelic and a like. Unfortunately, achieving also the basics is not a walk in the park as some metrics are hardware-related (CPU) and others live within the node process (internal errors) thus All the straightforward tools require some additional setup. For example, cloud vendor monitoring solutions (e.g. [AWS CloudWatch](https://aws.amazon.com/cloudwatch/), [Google StackDriver](https://cloud.google.com/stackdriver/)) will tell you immediately about the hardware metric but nothing about the internal app behavior. On the other end, Log-based solutions such as ElaticSeatch lack by default the hardware view. The solution is to augment your choice with missing metrics, for example, a popular choice is sending application logs to [Elastic stack](https://www.elastic.co/products) and configure some additional agent (e.g. [Beat](https://www.elastic.co/products)) to share with it hardware-related information to get the full picture. + + +

+ + +### Monitoring example: AWS cloudwatch default dashboard. Hard to extract in-app metrics + +![AWS cloudwatch default dashboard. Hard to extract in-app metrics](/assets/images/monitoring1.png) + +

+ +### Monitoring example: StackDriver default dashboard. Hard to extract in-app metrics + +![StackDriver default dashboard. Hard to extract in-app metrics](/assets/images/monitoring2.jpg) + +

+ +### Monitoring example: Grafana as the UI layer that visualizes raw data + +![Grafana as the UI layer that visualizes raw data](/assets/images/monitoring3.png) + +

+### What Other Bloggers Say +From the blog [Rising Stack](http://mubaloo.com/best-practices-deploying-node-js-applications/): + +> …We recommend you to watch these signals for all of your services: +> Error Rate: Because errors are user facing and immediately affect your customers. +> Response time: Because the latency directly affects your customers and business. +> Throughput: The traffic helps you to understand the context of increased error rates and the latency too. +> Saturation: It tells how “full” your service is. If the CPU usage is 90%, can your system handle more traffic? … diff --git a/translations/spanish/sections/production/productoncode.md b/translations/spanish/sections/production/productoncode.md new file mode 100644 index 000000000..15ca7b26a --- /dev/null +++ b/translations/spanish/sections/production/productoncode.md @@ -0,0 +1,17 @@ +# Make your code production-ready + +

+ + +### One Paragraph Explainer + +Following is a list of development tips that greatly affect the production maintenance and stability: + +* The twelve-factor guide – Get familiar with the [Twelve factors](https://12factor.net/) guide +* Be stateless – Save no data locally on a specific web server (see separate bullet – ‘Be Stateless’) +* Cache – Utilize cache heavily, yet never fail because of cache mismatch +* Test memory – gauge memory usage and leaks as part your development flow, tools such as ‘memwatch’ can greatly facilitate this task +* Name functions – Minimize the usage of anonymous functions (i.e. inline callbabk) as a typical memory profiler will provide memory usage per method name +* Use CI tools – Use CI tool to detect failures before sending to production. For example, use ESLint to detect reference errors and undefined variables. Use –trace-sync-io to identify code that uses synchronous APIs (instead of the async version) +* Log wisely – Include in each log statement contextual information, hopefully in JSON format so log aggregators tools such as Elastic can search upon those properties (see separate bullet – ‘Increase visibility using smart logs’). Also, include transaction-id that identifies each request and allows to correlate lines that describe the same transaction (see separate bullet – ‘Include Transaction-ID’) +* Error management – Error handling is the Achilles’ heel of Node.JS production sites – many Node processes are crashing because of minor errors while others hang on alive in a faulty state instead of crashing. Setting your error handling strategy is absolutely critical, read here my [error handling best practices](http://goldbergyoni.com/checklist-best-practices-of-node-js-error-handling/) diff --git a/translations/spanish/sections/production/setnodeenv.md b/translations/spanish/sections/production/setnodeenv.md new file mode 100644 index 000000000..8cd4ad67a --- /dev/null +++ b/translations/spanish/sections/production/setnodeenv.md @@ -0,0 +1,35 @@ +# Set NODE_ENV = production + +

+ + +### One Paragraph Explainer + +Process environment variables is a set of key-value pairs made available to any running program, usually for configuration purposes. Though any variables can be used, Node encourages the convention of using a variable called NODE_ENV to flag whether we’re in production right now. This determination allows components to provide better diagnostics during development , for example by disabling caching or emitting verbose log statements. Any modern deployment tool – Chef, Puppet, CloudFormation, others – support setting environment variables during deployment + +

+ + +### Code example: Setting and reading the NODE_ENV environment variable + +```javascript +//Using a command line, initializing node process and setting before environment variables +Set NODE_ENV=development&& set otherVariable=someValue&& node + +//Reading the environment variable using code +If(process.env.NODE_ENV === “production”) + useCaching = true; +``` + +

+ + +### What Other Bloggers Say +From the blog [dynatrace](https://www.dynatrace.com/blog/the-drastic-effects-of-omitting-node_env-in-your-express-js-applications/): +> ...In Node.js there is a convention to use a variable called NODE_ENV to set the current mode. We see that it in fact reads NODE_ENV and defaults to ‘development’ if it isn’t set. We clearly see that by setting NODE_ENV to production the number of requests Node.js can handle jumps by around two-thirds while the CPU usage even drops slightly. *Let me emphasize this: Setting NODE_ENV to production makes your application 3 times faster!* + + +![Set NODE_ENV = production](/assets/images/setnodeenv1.png "Set NODE_ENV = production") + + +

diff --git a/translations/spanish/sections/production/smartlogging.md b/translations/spanish/sections/production/smartlogging.md new file mode 100644 index 000000000..d2a9bb7ad --- /dev/null +++ b/translations/spanish/sections/production/smartlogging.md @@ -0,0 +1,43 @@ +# Make your app transparent using smart logs + +

+ + +### One Paragraph Explainer + +Since anyway you print out log statements and obviously in a need of some interface that wraps up production information where you can trace errors and core metrics (e.g. how many errors happen every hour and which is your slowest API end-point) why not invest some moderate effort in a robust logging framework that will tick all boxes? Achieving that requires a thoughtful decision on three steps: + +**1. smart logging** – at the bare minimum you need to use a reputable logging library like [Winston](https://github.com/winstonjs/winston), [Bunyan](https://github.com/trentm/node-bunyan) and write meaningful information at each transaction start and end. Consider to also format log statements as JSON and provide all the contextual properties (e.g. user id, operation type, etc) so that the operations team can act on those fields. Include also a unique transaction ID at each log line, for more information refer to the bullet below “Write transaction-id to log”. One last point to consider is also including an agent that logs the system resource like memory and CPU like Elastic Beat. + +**2. smart aggregation** – once you have comprehensive information within your servers file system, it’s time to periodically push these to a system that aggregates, facilities and visualizes this data. The Elastic stack, for example, is a popular and free choice that offers all the components to aggregate and visualize data. Many commercial products provide similar functionality only they greatly cut down the setup time and require no hosting. + +**3. smart visualization** – now the information is aggregated and searchable, one can be satisfied only with the power of easily searching the logs but this can go much further without coding or spending much effort. We can now show important operational metrics like error rate, average CPU throughout the day, how many new users opted-in in the last hour and any other metric that helps to govern and improve our app + +

+ + +### Visualization Example: Kibana (part of Elastic stack) facilitates advanced searching on log content + +![Kibana facilitates advanced searching on log content](/assets/images/smartlogging1.png "Kibana facilitates advanced searching on log content") + +

+ +### Visualization Example: Kibana (part of Elastic stack) visualizes data based on logs + +![Kibana visualizes data based on logs](/assets/images/smartlogging2.jpg "Kibana visualizes data based on logs") + +

+ +### Blog Quote: Logger Requirements +From the blog [Strong Loop](https://strongloop.com/strongblog/compare-node-js-logging-winston-bunyan/): + +> Lets identify a few requirements (for a logger): +> 1. Time stamp each log line. This one is pretty self explanatory – you should be able to tell when each log entry occured. +> 2. Logging format should be easily digestible by humans as well as machines. +> 3. Allows for multiple configurable destination streams. For example, you might be writing trace logs to one file but when an error is encountered, write to the same file, then into error file and send an email at the same time… + +

+ + + +

diff --git a/translations/spanish/sections/production/utilizecpu.md b/translations/spanish/sections/production/utilizecpu.md new file mode 100644 index 000000000..5fa6a9fec --- /dev/null +++ b/translations/spanish/sections/production/utilizecpu.md @@ -0,0 +1,27 @@ +# Utilize all CPU cores + +

+ + +### One Paragraph Explainer + +It might not come as a surprise that at its basic form, Node runs over a single thread=single process=single CPU. Paying for beefy hardware with 4 or 8 CPU and utilizing only one sounds crazy, right? The quickest solution which fits medium sized apps is using Node’s Cluster module which in 10 lines of code spawns a process for each logical core and route requests between the processes in a round-robin style. Even better, use PM2 which sugarcoats the clustering module with a simple interface and cool monitoring UI. While this solution works well for traditional applications, it might fall short for applications that require top-notch performance and robust devops flow. For those advanced use cases, consider replicating the NODE process using custom deployment script and balancing using a speciaizlies tool such as nginx or use a container engine such as AWS ECS or Kubernetees that are have advanced features for placement and replication of processes. + +

+ + +### Comparison: Balancing using Node’s cluster vs nginx + +![Balancing using Node’s cluster vs nginx](/assets/images/utilizecpucores1.png "Balancing using Node’s cluster vs nginx") + +

+ +### What Other Bloggers Say +* From the [Node.JS documentation](https://nodejs.org/api/cluster.html#cluster_how_it_works): +> ... The second approach, Node clusters, should, in theory, give the best performance. In practice however, distribution tends to be very unbalanced due to operating system scheduler vagaries. Loads have been observed where over 70% of all connections ended up in just two processes, out of a total of eight ... + +* From the blog [StrongLoop](From the blog StrongLoop): +> ... Clustering is made possible with Node’s cluster module. This enables a master process to spawn worker processes and distribute incoming connections among the workers. However, rather than using this module directly, it’s far better to use one of the many tools out there that does it for you automatically; for example node-pm or cluster-service ... + +* From the Medium post [Node.js process load balance performance: comparing cluster module, iptables and Nginx](https://medium.com/@fermads/node-js-process-load-balancing-comparing-cluster-iptables-and-nginx-6746aaf38272) +> ... Node cluster is simple to implement and configure, things are kept inside Node’s realm without depending on other software. Just remember your master process will work almost as much as your worker processes and with a little less request rate then the other solutions ... \ No newline at end of file diff --git a/translations/spanish/sections/projectstructre/breakintcomponents.md b/translations/spanish/sections/projectstructre/breakintcomponents.md new file mode 100644 index 000000000..65154d7d5 --- /dev/null +++ b/translations/spanish/sections/projectstructre/breakintcomponents.md @@ -0,0 +1,26 @@ +# Estructura tu solución en componentes + +

+ + +### One Paragraph Explainer + +For medium sized apps and above, monoliths are really bad - a one big software with many dependencies is just hard to reason about and often lead to code spaghetti. Even those smart architects who are skilled to tame the beast and 'modulurize' it - spend great mental effort on design and each change requires to carefully evaluate the impact on other dependant objects. The ultimate solution is to develop small software: divide the whole stack into self-contained components that don't share files with others, each constitute very few files (e.g. API, service, data access, test, etc) so that it's very easy to reason about it. Some may call this 'microservices' architecture - it's important to understand that microservices is not a spec which you must follow rather a set of principles. You may adopt many principles into a full-blown microservices architecture or adopt only few. Both are good as long as you keep the software complexity low. The very least you should do is create a basic borders between components, assign a folder in your project root for each business component and make it self contained - other components are allowed to consumeits functionality only through its public interface or API. This is the foundation for keeping your components simple, avoid dependencies hell and pave the way to full-blown microservices in the future once your app grows + +

+ + +### Blog Quote: "Scaling requires scaling of the entire application" + From the blog MartinFowler.com + + > Monolithic applications can be successful, but increasingly people are feeling frustrations with them - especially as more applications are being deployed to the cloud . Change cycles are tied together - a change made to a small part of the application, requires the entire monolith to be rebuilt and deployed. Over time it's often hard to keep a good modular structure, making it harder to keep changes that ought to only affect one module within that module. Scaling requires scaling of the entire application rather than parts of it that require greater resource. + +

+ + ### Good: Structure your solution by self-contained components +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Structuring solution by components") + +

+ +### Bad: Group your files by technical role +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebyroles.PNG "Structuring solution by technical roles") diff --git a/translations/spanish/sections/projectstructre/configguide.md b/translations/spanish/sections/projectstructre/configguide.md new file mode 100644 index 000000000..784e96e4d --- /dev/null +++ b/translations/spanish/sections/projectstructre/configguide.md @@ -0,0 +1,34 @@ +# Use environment aware, secure and hirearchical config + +

+ + +### One Paragraph Explainer + +When dealing with configuration data, many things can just annoy and slow down: (1) setting all the keys using process environment variables becomes very tedious when in need to inject 100 keys (instead of just committing those in a config file), however when dealing with files only the devops admins can not alter the behaviour without changing the code. A reliable config solution must combine both configuration files + overrides from the process variables (b) when specifying all keys in a flat JSON, it become frustrating to find and modify entries when the list grows big. An hirearchical JSON files that is grouped into section can overcome this issue + few config libraries allows to store the configuration in multiple files and take care to union all in runtime. See example below (3) storing sensitive information like DB password is obviously not recommended but no quick and handy solution exists for this challenge. Some configuraiton library allows to encrypt files, others encrypt those entries during GIT commits or simple don't store real values for those entries and specify the actual value during deployment via environment variables. (4) some advanced config scenario demand to inject configuration value via command line (vargs) or sync configuration info via centralized cache like Redis so different servers won't hold different data. + +Some configuration libraries can provide most of these features for free, have a look at NPM libraries like [nconf](https://www.npmjs.com/package/nconf) and [config](https://www.npmjs.com/package/config) which tick many of these requirements. + +

+ +### Code Example – hirearchical config helps to find entries and maintain huge config files + +```javascript +{ + // Customer module configs + "Customer": { + "dbConfig": { + "host": "localhost", + "port": 5984, + "dbName": "customers" + }, + "credit": { + "initialLimit": 100, + // Set low for development + "initialDays": 1 + } + } +} +``` + +

diff --git a/translations/spanish/sections/projectstructre/createlayers.md b/translations/spanish/sections/projectstructre/createlayers.md new file mode 100644 index 000000000..be55b3fc7 --- /dev/null +++ b/translations/spanish/sections/projectstructre/createlayers.md @@ -0,0 +1,11 @@ +# Layer your app, keep Express within its boundaries + +

+ + ### Separate component code into layers: web, services and DAL +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Separate component code into layers") + +

+ +### 1 min explainer: The downside of mixing layers +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/keepexpressinweb.gif "The downside of mixing layers") diff --git a/translations/spanish/sections/projectstructre/separateexpress.md b/translations/spanish/sections/projectstructre/separateexpress.md new file mode 100644 index 000000000..389a7aa14 --- /dev/null +++ b/translations/spanish/sections/projectstructre/separateexpress.md @@ -0,0 +1,47 @@ +# Separate Express 'app' and 'server' + +

+ + +### One Paragraph Explainer + +The latest Express generator comes with a great practice that is worth to keep - the API declaration is separated from the network related configuration (port, protocol, etc). This allows testing the API in-process, without performing network calls, with all the benefits that it brings to the table: fast testing execution and getting coverage metrics of the code. It also allows deploying the same API under flexible and different network conditions. Bonus: better separation of concerns and cleaner code + +

+ +### Code example: API declaration, should reside in app.js + +```javascript +var app = express(); +app.use(bodyParser.json()); +app.use("/api/events", events.API); +app.use("/api/forms", forms); + +``` + +

+ +### Code example: Server network declaration, should reside in /bin/www + +```javascript +var app = require('../app'); +var http = require('http'); + +/** + * Get port from environment and store in Express. + */ + +var port = normalizePort(process.env.PORT || '3000'); +app.set('port', port); + +/** + * Create HTTP server. + */ + +var server = http.createServer(app); + +``` + + +### Example: test your API in-process using supertest (popular testing package) +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/supertestinprocess.PNG "In process testing with Supertest") diff --git a/translations/spanish/sections/projectstructre/thincomponents.md b/translations/spanish/sections/projectstructre/thincomponents.md new file mode 100644 index 000000000..8b61a0438 --- /dev/null +++ b/translations/spanish/sections/projectstructre/thincomponents.md @@ -0,0 +1,26 @@ +# Structure your solution by components + +

+ + +### One Paragraph Explainer + +For medium sized apps and above, monoliths are really bad - a one big software with many dependencies is just hard to reason about and often lead to code spaghetti. Even those smart architects who are skilled to tame the beast and 'modulurize' it - spend great mental effort on design and each change requires to carefully evaluate the impact on other dependant objects. The ultimate solution is to develop small software: divide the whole stack into self-contained components that don't share files with others, each constitute very few files (e.g. API, service, data access, test, etc) so that it's very easy to reason about it. Some may call this 'microservices' architecture - it's important to understand that microservices is not a spec which you must follow rather a set of principles. You may adopt many principles into a full-blown microservices architecture or adopt only few. Both are good as long as you keep the software complexity low. The very least you should do is create a basic borders between components, assign a folder in your project root for each business component and make it self contained - other components are allowed to consumeits functionality only through its public interface or API. This is the foundation for keeping your components simple, avoid dependencies hell and pave the way to full-blown microservices in the future once your app grows + +

+ + +### Blog Quote: "Scaling requires scaling of the entire application" + From the blog MartinFowler.com + + > Monolithic applications can be successful, but increasingly people are feeling frustrations with them - especially as more applications are being deployed to the cloud . Change cycles are tied together - a change made to a small part of the application, requires the entire monolith to be rebuilt and deployed. Over time it's often hard to keep a good modular structure, making it harder to keep changes that ought to only affect one module within that module. Scaling requires scaling of the entire application rather than parts of it that require greater resource. + +

+ + ### Good: Structure your solution by self-contained components +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Structuring solution by components") + +

+ +### Bad: Group your files by technical role +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebyroles.PNG "Structuring solution by technical roles") diff --git a/translations/spanish/sections/projectstructre/wraputilities.md b/translations/spanish/sections/projectstructre/wraputilities.md new file mode 100644 index 000000000..6d47e6050 --- /dev/null +++ b/translations/spanish/sections/projectstructre/wraputilities.md @@ -0,0 +1,14 @@ +# Wrap common utilities as NPM packages + +

+ + +### One Paragraph Explainer +Once you start growing and have different components on different servers which consumes similar utilities, you should start managing the dependencies - how can you keep 1 copy of your utility code and let multiple consumer components use and deploy it? well, there is a framework for that, it's called NPM... Start by wrapping 3rd party utility packages with your own code to make it easily replaceable in the future and publish your own code as private NPM package. Now, all your code base can import that code and benefit free dependency management framework. It's possible to publish NPM packages for your own private use without sharing it publicly using [private modules](https://docs.npmjs.com/private-modules/intro), [private registry](https://npme.npmjs.com/docs/tutorials/npm-enterprise-with-nexus.html) or [local NPM packages](https://medium.com/@arnaudrinquin/build-modular-application-with-npm-local-modules-dfc5ff047bcc) + + +

+ + + ### Sharing your own common utilities across environments and components +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/Privatenpm.png "Structuring solution by components") diff --git a/translations/spanish/sections/template.md b/translations/spanish/sections/template.md new file mode 100644 index 000000000..22ca4a553 --- /dev/null +++ b/translations/spanish/sections/template.md @@ -0,0 +1,40 @@ +# Title here + +

+ + +### One Paragraph Explainer + +Text + +

+ + +### Code Example – explanation + +```javascript +code here +``` + +

+ +### Code Example – another + +```javascript +code here +``` + +

+ +### Blog Quote: "Title" + From the blog pouchdb.com, ranked 11 for the keywords “Node Promises” + + > …text here + +

+ + ### Image title +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/swaggerDoc.png "API error handling") + + +

diff --git a/translations/spanish/sections/testingandquality/bumpversion.md b/translations/spanish/sections/testingandquality/bumpversion.md new file mode 100644 index 000000000..29216b336 --- /dev/null +++ b/translations/spanish/sections/testingandquality/bumpversion.md @@ -0,0 +1,30 @@ +# Title here + + +### One Paragraph Explainer + +Text + + +### Code Example – explanation + +```javascript +code here +``` + +### Code Example – another + +```javascript +code here +``` + +### Blog Quote: "Title" + From the blog pouchdb.com, ranked 11 for the keywords “Node Promises” + + > …text here + + ### Image title +![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/swaggerDoc.png "API error handling") + + + From cf9f9f4fbf58fb29d9f7fed1f48605d73b0965c8 Mon Sep 17 00:00:00 2001 From: Eduardo Montalvo Date: Fri, 3 Nov 2017 14:11:55 -0600 Subject: [PATCH 2/7] Remove draft files --- .../projectstructre/breakintcomponents.md | 2 +- .../sections/drafts/readme-general-toc-1.md | 84 ------------- .../sections/drafts/readme-general-toc-2.md | 78 ------------ .../sections/drafts/readme-general-toc-3.md | 82 ------------- .../sections/drafts/readme-general-toc-4.md | 115 ------------------ 5 files changed, 1 insertion(+), 360 deletions(-) delete mode 100644 translations/spanish/sections/drafts/readme-general-toc-1.md delete mode 100644 translations/spanish/sections/drafts/readme-general-toc-2.md delete mode 100644 translations/spanish/sections/drafts/readme-general-toc-3.md delete mode 100644 translations/spanish/sections/drafts/readme-general-toc-4.md diff --git a/sections/projectstructre/breakintcomponents.md b/sections/projectstructre/breakintcomponents.md index 8b61a0438..f2a57a74f 100644 --- a/sections/projectstructre/breakintcomponents.md +++ b/sections/projectstructre/breakintcomponents.md @@ -1,4 +1,4 @@ -# Structure your solution by components +# Estructura tu solución en componentes Structure your solution by components

diff --git a/translations/spanish/sections/drafts/readme-general-toc-1.md b/translations/spanish/sections/drafts/readme-general-toc-1.md deleted file mode 100644 index 6590b0f40..000000000 --- a/translations/spanish/sections/drafts/readme-general-toc-1.md +++ /dev/null @@ -1,84 +0,0 @@ - - -

- Node.js Best Practices -

- -53 items Last update: 7 days ago Updated for Node v.8.4 - - - -# Welcome to Node.js Best Practices - -Welcome to the biggest compilation of Node.JS best practices. The content below was gathered from all top ranked books and posts and is updated constantly - when you read here rest assure that no significant tip slipped away. Feel at home - we love to discuss via PRs, issues or Gitter. - -## Table of Contents -* [Project Setup Practices (18)](#project-setup-practices) -* [Code Style Practices (11) ](#code-style-practices) -* [Error Handling Practices (14) ](#error-handling-practices) -* [Going To Production Practices (21) ](#going-to-production-practices) -* [Testing Practices (9) ](#deployment-practices) -* [Security Practices (8) ](#security-practices) - - -

-# `Project Setup Practices` - -## ✔ 1. Structure your solution by feature ('microservices') - -**TL&DR:** The worst large applications pitfal is a huge code base with hundreds of dependencies that slow down they developers as they try to incorporate new features. Partioning into small units ensures that each unit is kept simple and easy to maintain. This strategy pushes the complexity to the higher level - designing the cross-component interactions. - -**Otherwise:** Developing a new feature with a change to few objects demands to evaluate how this changes might affect dozends of dependants and ach deployment becomes a fear. - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -

- -## ✔ 2. Layer your app, keep Express within its boundaries - -**TL&DR:** It's very common to see Express API passes the express objects (req, res) to business logic and data layers, sometimes even to every function - this makes your application depedant on and accessible by Express only. What if your code should be reached by testing console or CRON job? instead create your own context object with cross-cutting-concern properties like the user roles and inject into other layers, or use 'thread-level variables' libraries like continuation local storage - -**Otherwise:** Application can be accessed by Express only and require to create complex testing mocks - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -

- -## ✔ 3. Configure ESLint with node-specific plugins - -**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug - -**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - - -


-# `Code Style Practices` - - -


-# `Error Handling Practices` -

⬆ Return to top

- -## ✔ Use async-await for async error handling - -* **TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch - -* **Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns - -🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) - - - -


-# `Going To Production Practices` - - -


-# `Deployment Practices` - - -


-# `Security Practices` - diff --git a/translations/spanish/sections/drafts/readme-general-toc-2.md b/translations/spanish/sections/drafts/readme-general-toc-2.md deleted file mode 100644 index de2701138..000000000 --- a/translations/spanish/sections/drafts/readme-general-toc-2.md +++ /dev/null @@ -1,78 +0,0 @@ -# Node.JS Best Practices - -53 items Last update: 7 days ago Updated for Node v.8.4 - -![Node.js Best Practices](assets/images/banner-1.png) - -# Welcome to Node.js Best Practices - -Welcome to the biggest compilation of Node.JS best practices. The content below was gathered from all top ranked books and posts and is updated constantly - when you read here rest assure that no significant tip slipped away. Feel at home - we love to discuss via PRs, issues or Gitter. - -## Table of Contents -* [Project Setup Practices (18)](#project-setup-practices) -* [Code Style Practices (11) ](#code-style-practices) -* [Error Handling Practices (14) ](#error-handling-practices) -* [Going To Production Practices (21) ](#going-to-production-practices) -* [Testing Practices (9) ](#deployment-practices) -* [Security Practices (8) ](#security-practices) - -

-# `Project Setup Practices` - -## ✔ 1. Structure your solution by feature ('microservices') - -**TL&DR:** The worst large applications pitfal is a huge code base where hundreds of dependencies slow down developers as try to incorporate new features. Partioning into small units ensures that each unit is kept simple and very easy to maintain. This strategy pushes the complexity to the higher level - designing the cross-component interactions. - -**Otherwise:** Developing a new feature with a change to few objects demands to evaluate how this changes might affect dozends of dependants and ach deployment becomes a fear. - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -
- -## ✔ 2. Layer your app, keep Express within its boundaries - -**TL&DR:** It's very common to see Express API passes the express objects (req, res) to business logic and data layers, sometimes even to every function - this makes your application depedant on and accessible by Express only. What if your code should be reached by testing console or CRON job? instead create your own context object with cross-cutting-concern properties like the user roles and inject into other layers, or use 'thread-level variables' libraries like continuation local storage - -**Otherwise:** Application can be accessed by Express only and require to create complex testing mocks - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -

- -## ✔ 3. Configure ESLint with node-specific plugins - -**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug - -**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -


-# `Code Style Practices` - - -


-# `Error Handling Practices` -

⬆ Return to top

- -## ✔ Use async-await for async error handling - -**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch - -**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns - -🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) - - - -


-# `Going To Production Practices` - - -


-# `Deployment Practices` - - -


-# `Security Practices` - diff --git a/translations/spanish/sections/drafts/readme-general-toc-3.md b/translations/spanish/sections/drafts/readme-general-toc-3.md deleted file mode 100644 index d6ec6386a..000000000 --- a/translations/spanish/sections/drafts/readme-general-toc-3.md +++ /dev/null @@ -1,82 +0,0 @@ - - -

- Node.js Best Practices -

- -53 items Last update: 7 days ago Updated for Node v.8.4 - - - -# Welcome to Node.js Best Practices - -Welcome to the biggest compilation of Node.JS best practices, based on our check it's also the largest collection on any programming language (more than 53 items). The content below was gathered from all top ranked books and posts and is updated constantly - if you read here you can rest assure that no significant tip slipped away. Feel at home - we love to discuss via PRs, issues or Gitter. -

-## Table of Contents -* [Project Setup Practices (18)](#project-setup-practices) -* [Code Style Practices (11) ](#code-style-practices) -* [Error Handling Practices (14) ](#error-handling-practices) -* [Going To Production Practices (21) ](#going-to-production-practices) -* [Testing Practices (9) ](#deployment-practices) -* [Security Practices (8) ](#security-practices) - -

-# `Project Setup Practices` - -## ✔ 1. Structure your solution by feature ('microservices') - -**TL&DR:** The worst large applications pitfal is a huge code base where hundreds of dependencies slow down developers as try to incorporate new features. Partioning into small units ensures that each unit is kept simple and very easy to maintain. This strategy pushes the complexity to the higher level - designing the cross-component interactions. - -**Otherwise:** Developing a new feature with a change to few objects demands to evaluate how this changes might affect dozends of dependants and ach deployment becomes a fear. - -

- -## ✔ 2. Layer your app, keep Express within its boundaries - -**TL&DR:** It's very common to see Express API passes the express objects (req, res) to business logic and data layers, sometimes even to every function - this makes your application depedant on and accessible by Express only. What if your code should be reached by testing console or CRON job? instead create your own context object with cross-cutting-concern properties like the user roles and inject into other layers, or use 'thread-level variables' libraries like continuation local storage - -**Otherwise:** Application can be accessed by Express only and require to create complex testing mocks - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - - -

- -## ✔ 3. Configure ESLint with node-specific plugins - -**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug - -**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - - -


-# `Code Style Practices` - - -


-# `Error Handling Practices` -

⬆ Return to top

- -## ✔ Use async-await for async error handling - -**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch - -**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns - -🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) - - - -


-# `Going To Production Practices` - - -


-# `Deployment Practices` - - -


-# `Security Practices` - diff --git a/translations/spanish/sections/drafts/readme-general-toc-4.md b/translations/spanish/sections/drafts/readme-general-toc-4.md deleted file mode 100644 index 987b0084c..000000000 --- a/translations/spanish/sections/drafts/readme-general-toc-4.md +++ /dev/null @@ -1,115 +0,0 @@ - - -

- Node.js Best Practices -

- -53 items Last update: 7 days ago Updated for Node v.8.4 - - - -# Welcome to Node.js Best Practices - -Welcome to the biggest compilation of Node.JS best practices, based on our check it's also the largest collection on any programming language (more than 53 items). The content below was gathered from all top ranked books and posts and is updated constantly - if you read here you can rest assure that no significant tip slipped away. Feel at home - we love to discuss via PRs, issues or Gitter. - -## Table of Contents -* [Project Setup Practices (18)](#project-setup-practices) -* [Code Style Practices (11) ](#code-style-practices) -* [Error Handling Practices (14) ](#error-handling-practices) -* [Going To Production Practices (21) ](#going-to-production-practices) -* [Testing Practices (9) ](#deployment-practices) -* [Security Practices (8) ](#security-practices) - -

-# `Project Setup Practices` - -## ![](assets/images/checkbox-sm.png) 1. Structure your solution by feature ('microservices') - -**TL&DR:** The worst large applications pitfal is a huge code base where hundreds of dependencies slow down developers as try to incorporate new features. Partioning into small units ensures that each unit is kept simple and very easy to maintain. This strategy pushes the complexity to the higher level - designing the cross-component interactions. - -**Otherwise:** Developing a new feature with a change to few objects demands to evaluate how this changes might affect dozends of dependants and ach deployment becomes a fear. - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -

- -## ![](assets/images/checkbox-sm.png) 2. Layer your app, keep Express within its boundaries - -**TL&DR:** It's very common to see Express API passes the express objects (req, res) to business logic and data layers, sometimes even to every function - this makes your application depedant on and accessible by Express only. What if your code should be reached by testing console or CRON job? instead create your own context object with cross-cutting-concern properties like the user roles and inject into other layers, or use 'thread-level variables' libraries like continuation local storage - -**Otherwise:** Application can be accesses by Express only and require to create complex testing mocks - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -

- -## ![](assets/images/checkbox-sm.png) 3. Configure ESLint with node-specific plugins - -**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug - -**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - - - -

- -## Additional 15 bullets will appear here - -


-# `Code Style Practices` - -## ![](assets/images/checkbox-sm.png) 1. Use async-await - -**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug - -**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -

- -## ![](assets/images/checkbox-sm.png) 2. Break into small classes or objects - -**TL&DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my sug - -**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information - -🔗 [**Read More: Structure by feature*](/sections/errorhandling/asyncawait.md) - -


-# `Error Handling Practices` -

⬆ Return to top

- -## Use async-await for async error handling - -**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch - -**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns - -🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) - -

- -## Use async-await for async error handling - -**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch - -**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns - -🔗 [**Use async-await for async error handling**](/sections/errorhandling/asyncawait.md) - - - -


-# `Going To Production Practices` - - -


-# `Deployment Practices` - - -


-# `Security Practices` - From 0bde8a74a6efe1c5df63a6c3c34fe2e6fe5d14a3 Mon Sep 17 00:00:00 2001 From: Eduardo Montalvo Date: Fri, 3 Nov 2017 16:01:11 -0600 Subject: [PATCH 3/7] Add spanish translation for project structure practices --- sections/projectstructre/breakintcomponents.md | 15 +++++++-------- sections/projectstructre/configguide.md | 10 +++++----- sections/projectstructre/createlayers.md | 6 +++--- sections/projectstructre/separateexpress.md | 18 ++++++------------ sections/projectstructre/wraputilities.md | 11 ++++------- 5 files changed, 25 insertions(+), 35 deletions(-) diff --git a/sections/projectstructre/breakintcomponents.md b/sections/projectstructre/breakintcomponents.md index f2a57a74f..7b128d2c0 100644 --- a/sections/projectstructre/breakintcomponents.md +++ b/sections/projectstructre/breakintcomponents.md @@ -2,25 +2,24 @@

+### Un párrafo explicativo -### One Paragraph Explainer - -For medium sized apps and above, monoliths are really bad - a one big software with many dependencies is just hard to reason about and often lead to code spaghetti. Even those smart architects who are skilled to tame the beast and 'modulurize' it - spend great mental effort on design and each change requires to carefully evaluate the impact on other dependant objects. The ultimate solution is to develop small software: divide the whole stack into self-contained components that don't share files with others, each constitute very few files (e.g. API, service, data access, test, etc) so that it's very easy to reason about it. Some may call this 'microservices' architecture - it's important to understand that microservices is not a spec which you must follow rather a set of principles. You may adopt many principles into a full-blown microservices architecture or adopt only few. Both are good as long as you keep the software complexity low. The very least you should do is create a basic borders between components, assign a folder in your project root for each business component and make it self contained - other components are allowed to consumeits functionality only through its public interface or API. This is the foundation for keeping your components simple, avoid dependencies hell and pave the way to full-blown microservices in the future once your app grows +Para aplicaciones medianas y superiores, los monolitos son realmente malos: un software grande con muchas dependencias es simplemente difícil de entender y a menudo conduce a código espagueti. Incluso aquellos arquitectos inteligentes que están capacitados para domesticar a la bestia y "modularla": dedican un gran esfuerzo mental al diseño y cada cambio requiere evaluar cuidadosamente el impacto en otros objetos dependientes. La solución definitiva es desarrollar software pequeño: divide el stack completo en componentes independientes que no compartan archivos con otros, cada componente constituye muy pocos archivos (por ejemplo, API, servicio, acceso a datos, test, etc.) para que sea muy fácil entender. Algunos pueden llamar a esto 'arquitectura de microservicios': es importante entender que los microservicios no son una especificación que debas seguir sino un conjunto de principios. Puedes adoptar muchos principios en una arquitectura de microservicios en toda regla o adoptar solo unos pocos. Ambos son buenos siempre y cuando mantengas baja la complejidad del software. Lo mínimo que debes hacer es crear una frontera básica entre los componentes, asignar una carpeta en la raíz del proyecto para cada componente de negocio y hacerlo autónomo: otros componentes pueden consumir su funcionalidad solo a través de su interfaz pública o API. Esta es la base para mantener tus componentes simples, evitar las dependencias y allanar el camino a los microservicios en el futuro una vez que tu aplicación crezca

-### Blog Quote: "Scaling requires scaling of the entire application" - From the blog MartinFowler.com +### Cita de Blog: "El escalado requiere escalar toda la aplicación" + De el blog MartinFowler.com - > Monolithic applications can be successful, but increasingly people are feeling frustrations with them - especially as more applications are being deployed to the cloud . Change cycles are tied together - a change made to a small part of the application, requires the entire monolith to be rebuilt and deployed. Over time it's often hard to keep a good modular structure, making it harder to keep changes that ought to only affect one module within that module. Scaling requires scaling of the entire application rather than parts of it that require greater resource. + > Las aplicaciones monolíticas pueden ser exitosas, pero cada vez más personas sienten frustraciones con ellas, especialmente a medida que se implementan más aplicaciones en la nube. Los ciclos de cambio están unidos: un cambio realizado en una pequeña parte de la aplicación requiere que se reconstruya y despliegue todo el monolito. Con el tiempo, a menudo es difícil mantener una buena estructura modular, lo que hace más difícil mantener los cambios que solo deberían afectar a un módulo dentro de ese módulo. El escalado requiere escalar toda la aplicación en lugar de partes de ella que requieren un mayor recurso.

- ### Good: Structure your solution by self-contained components + ### Bien: Estructura tu solución en componentes autónomos ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Structuring solution by components")

-### Bad: Group your files by technical role +### Bad: Agrupa tus archivos por rol técnico ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebyroles.PNG "Structuring solution by technical roles") diff --git a/sections/projectstructre/configguide.md b/sections/projectstructre/configguide.md index 784e96e4d..e6a040ce5 100644 --- a/sections/projectstructre/configguide.md +++ b/sections/projectstructre/configguide.md @@ -1,17 +1,17 @@ -# Use environment aware, secure and hirearchical config +# Usar una configuración segura, jerárquica y consciente del entorno

-### One Paragraph Explainer +### Un párrafo explicativo -When dealing with configuration data, many things can just annoy and slow down: (1) setting all the keys using process environment variables becomes very tedious when in need to inject 100 keys (instead of just committing those in a config file), however when dealing with files only the devops admins can not alter the behaviour without changing the code. A reliable config solution must combine both configuration files + overrides from the process variables (b) when specifying all keys in a flat JSON, it become frustrating to find and modify entries when the list grows big. An hirearchical JSON files that is grouped into section can overcome this issue + few config libraries allows to store the configuration in multiple files and take care to union all in runtime. See example below (3) storing sensitive information like DB password is obviously not recommended but no quick and handy solution exists for this challenge. Some configuraiton library allows to encrypt files, others encrypt those entries during GIT commits or simple don't store real values for those entries and specify the actual value during deployment via environment variables. (4) some advanced config scenario demand to inject configuration value via command line (vargs) or sync configuration info via centralized cache like Redis so different servers won't hold different data. +Cuando se trata de datos de configuración, muchas cosas pueden molestar y ralentizar: (1) configurar todas las claves utilizando variables de entorno de proceso resulta muy tedioso cuando se necesitan inyectar 100 claves (en lugar de solo confirmarlas en un archivo de configuración), pero cuando tratas solo con archivos los administradores de devops no pueden alterar el comportamiento sin cambiar el código. Una solución de configuración confiable debe combinar los archivos de configuración y las sobre escrituras de las variables de proceso (b) al especificar todas las claves en un JSON plano, resulta frustrante encontrar y modificar entradas cuando la lista crece. Un archivo JSON jerárquico que está agrupado en una sección puede resolver este problema + pocas bibliotecas de configuración permiten almacenar la configuración en múltiples archivos y cuidar la unión en tiempo de ejecución. Ve el siguiente ejemplo (3) que almacena información confidencial como la contraseña de la base de datos, obviamente, no se recomienda, pero no existe una solución rápida y práctica para este desafío. Algunas bibliotecas de configuraciones permiten cifrar archivos, otras cifran esas entradas durante los commits de GIT o simplemente no almacenan valores reales para esas entradas y especifican el valor real durante la implementación a través de variables de entorno. (4) Algunos escenarios de configuración avanzada exigen inyectar el valor de configuración a través de la línea de comando (vargs) o sincronizar información de configuración a través de la caché centralizada como Redis para que los diferentes servidores no contengan datos diferentes. -Some configuration libraries can provide most of these features for free, have a look at NPM libraries like [nconf](https://www.npmjs.com/package/nconf) and [config](https://www.npmjs.com/package/config) which tick many of these requirements. +Algunas bibliotecas de configuración pueden proporcionar la mayoría de estas características de forma gratuita, echa un vistazo a las bibliotecas de NPM como [nconf](https://www.npmjs.com/package/nconf) y [config](https://www.npmjs.com/package/config) que validan muchos de estos requisitos.

-### Code Example – hirearchical config helps to find entries and maintain huge config files +### Ejemplo de código – la configuración jerárquica ayuda a encontrar entradas y mantener enormes archivos de configuración ```javascript { diff --git a/sections/projectstructre/createlayers.md b/sections/projectstructre/createlayers.md index be55b3fc7..63108fa3f 100644 --- a/sections/projectstructre/createlayers.md +++ b/sections/projectstructre/createlayers.md @@ -1,11 +1,11 @@ -# Layer your app, keep Express within its boundaries +# Aplica capas a tus componentes, mantén Express dentro de sus límites

- ### Separate component code into layers: web, services and DAL + ### Separa el código del componente en capas: web, servicios y DAL (capa de acceso a datos) ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Separate component code into layers")

-### 1 min explainer: The downside of mixing layers +### 1 min explicativo: The downside of mixing layers ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/keepexpressinweb.gif "The downside of mixing layers") diff --git a/sections/projectstructre/separateexpress.md b/sections/projectstructre/separateexpress.md index 389a7aa14..fd82eb831 100644 --- a/sections/projectstructre/separateexpress.md +++ b/sections/projectstructre/separateexpress.md @@ -1,16 +1,14 @@ -# Separate Express 'app' and 'server' +# Separar 'servidor' y 'aplicación' de express

+### Un párrafo explicativo -### One Paragraph Explainer - -The latest Express generator comes with a great practice that is worth to keep - the API declaration is separated from the network related configuration (port, protocol, etc). This allows testing the API in-process, without performing network calls, with all the benefits that it brings to the table: fast testing execution and getting coverage metrics of the code. It also allows deploying the same API under flexible and different network conditions. Bonus: better separation of concerns and cleaner code +El último generador de Express viene con una gran práctica que vale la pena mantener: la declaración del API está separada de la configuración relacionada con la red (puerto, protocolo, etc.). Esto permite probar el API en proceso, sin realizar llamadas de red, con todos los beneficios que trae a la mesa: ejecución de prueba rápida y obtención de métricas de cobertura del código. También permite implementar la misma API bajo condiciones de red flexibles y diferentes. Bonificación: mejor separación de conceptos y código más limpio

-### Code example: API declaration, should reside in app.js - +### Ejemplo de código: Declaración del API, debe residiir en app.js ```javascript var app = express(); app.use(bodyParser.json()); @@ -18,11 +16,9 @@ app.use("/api/events", events.API); app.use("/api/forms", forms); ``` -

-### Code example: Server network declaration, should reside in /bin/www - +### Ejemplo de código: Declaración de red del servidor, debe residir en /bin/www ```javascript var app = require('../app'); var http = require('http'); @@ -41,7 +37,5 @@ app.set('port', port); var server = http.createServer(app); ``` - - -### Example: test your API in-process using supertest (popular testing package) +### Ejemplo: prueba tu API en proceso usanso supertest (paquete de testing popular) ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/supertestinprocess.PNG "In process testing with Supertest") diff --git a/sections/projectstructre/wraputilities.md b/sections/projectstructre/wraputilities.md index 6d47e6050..7e4207b7c 100644 --- a/sections/projectstructre/wraputilities.md +++ b/sections/projectstructre/wraputilities.md @@ -1,14 +1,11 @@ -# Wrap common utilities as NPM packages +# Envuelve las utilidades comunes como paquetes de NPM

- -### One Paragraph Explainer -Once you start growing and have different components on different servers which consumes similar utilities, you should start managing the dependencies - how can you keep 1 copy of your utility code and let multiple consumer components use and deploy it? well, there is a framework for that, it's called NPM... Start by wrapping 3rd party utility packages with your own code to make it easily replaceable in the future and publish your own code as private NPM package. Now, all your code base can import that code and benefit free dependency management framework. It's possible to publish NPM packages for your own private use without sharing it publicly using [private modules](https://docs.npmjs.com/private-modules/intro), [private registry](https://npme.npmjs.com/docs/tutorials/npm-enterprise-with-nexus.html) or [local NPM packages](https://medium.com/@arnaudrinquin/build-modular-application-with-npm-local-modules-dfc5ff047bcc) - +### Un párrafo explicativo +Una vez que comienzas a crecer y tienes diferentes componentes en diferentes servidores que consumen utilidades similares, debes comenzar a administrar las dependencias: ¿cómo puedes conservar 1 copia de tu código de utilidad y permitir que múltiples componentes de consumo lo usen y lo implementen? bueno, hay un marco para eso, se llama NPM ... Comienza envolviendo paquetes de utilidad de terceros con tu propio código para que sea fácilmente reemplazable en el futuro y publiqca tu propio código como paquete privado de NPM. Ahora, toda su base de código puede importar ese código y beneficiarse del framework de gestión de dependencias gratuito. Es posible publicar paquetes de NPM para tu propio uso privado sin compartirlo públicamente utilizando [módulos privados] (https://docs.npmjs.com/private-modules/intro), [registro privado] (https: //npme.npmjs .com / docs / tutorials / npm-enterprise-with-nexus.html) o [paquetes locales de NPM] (https://medium.com/@arnaudrinquin/build-modular-application-with-npm-local-modules-dfc5ff047bcc )

- - ### Sharing your own common utilities across environments and components +### Comparte tus propias utilidades comunes a través de ambientes y componentes ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/Privatenpm.png "Structuring solution by components") From 7b2520fdd619df59a2675481cdc7c81be7cd8757 Mon Sep 17 00:00:00 2001 From: Eduardo Montalvo Date: Mon, 8 Jan 2018 19:16:48 -0600 Subject: [PATCH 4/7] Add new title for promises --- translations/spanish/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/translations/spanish/README.md b/translations/spanish/README.md index a0b6db89b..eb7a0d83d 100644 --- a/translations/spanish/README.md +++ b/translations/spanish/README.md @@ -93,11 +93,11 @@ De lo contrario: no cumplir con ninguno de los requisitos de configuración simp


-

⬆ Return to top

+

⬆ Regresar arriba

-# `2. Error Handling Practices` +# `2. Prácticas en manejo de errores` -## ![✔] 2.1 Use Async-Await or promises for async error handling +## ![✔] 2.1 Usa Async-Await o promesas para manejo de errores asíncronos **TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch From ad5b032849de32e18f0f214745144f02e3ae68e5 Mon Sep 17 00:00:00 2001 From: Eduardo Montalvo Date: Mon, 8 Jan 2018 21:47:40 -0600 Subject: [PATCH 5/7] Removed translations folder and modify readme file to translate up to point two --- README.md | 175 +++-- translations/spanish/README.md | 674 ------------------ .../sections/errorhandling/apmproducts.md | 29 - .../errorhandling/asyncerrorhandling.md | 56 -- .../catchunhandledpromiserejection.md | 58 -- .../errorhandling/centralizedhandling.md | 83 --- .../errorhandling/documentingusingswagger.md | 15 - .../sections/errorhandling/failfast.md | 50 -- .../sections/errorhandling/monitoring.md | 18 - .../operationalvsprogrammererror.md | 51 -- .../errorhandling/shuttingtheprocess.md | 54 -- .../errorhandling/testingerrorflows.md | 37 - .../sections/errorhandling/usematurelogger.md | 51 -- .../errorhandling/useonlythebuiltinerror.md | 78 -- .../sections/production/apmproducts.md | 27 - .../production/assigntransactionid.md | 41 -- .../sections/production/bestateless.md | 39 - .../production/createmaintenanceendpoint.md | 35 - .../sections/production/delegatetoproxy.md | 50 -- .../production/detectvulnerabilities.md | 26 - .../sections/production/frontendout.md | 41 -- .../sections/production/guardprocess.md | 19 - .../sections/production/lockdependencies.md | 74 -- .../sections/production/measurememory.md | 26 - .../spanish/sections/production/monitoring.md | 39 - .../sections/production/productoncode.md | 17 - .../spanish/sections/production/setnodeenv.md | 35 - .../sections/production/smartlogging.md | 43 -- .../spanish/sections/production/utilizecpu.md | 27 - .../projectstructre/breakintcomponents.md | 26 - .../sections/projectstructre/configguide.md | 34 - .../sections/projectstructre/createlayers.md | 11 - .../projectstructre/separateexpress.md | 47 -- .../projectstructre/thincomponents.md | 26 - .../sections/projectstructre/wraputilities.md | 14 - translations/spanish/sections/template.md | 40 -- .../sections/testingandquality/bumpversion.md | 30 - 37 files changed, 83 insertions(+), 2113 deletions(-) delete mode 100644 translations/spanish/README.md delete mode 100644 translations/spanish/sections/errorhandling/apmproducts.md delete mode 100644 translations/spanish/sections/errorhandling/asyncerrorhandling.md delete mode 100644 translations/spanish/sections/errorhandling/catchunhandledpromiserejection.md delete mode 100644 translations/spanish/sections/errorhandling/centralizedhandling.md delete mode 100644 translations/spanish/sections/errorhandling/documentingusingswagger.md delete mode 100644 translations/spanish/sections/errorhandling/failfast.md delete mode 100644 translations/spanish/sections/errorhandling/monitoring.md delete mode 100644 translations/spanish/sections/errorhandling/operationalvsprogrammererror.md delete mode 100644 translations/spanish/sections/errorhandling/shuttingtheprocess.md delete mode 100644 translations/spanish/sections/errorhandling/testingerrorflows.md delete mode 100644 translations/spanish/sections/errorhandling/usematurelogger.md delete mode 100644 translations/spanish/sections/errorhandling/useonlythebuiltinerror.md delete mode 100644 translations/spanish/sections/production/apmproducts.md delete mode 100644 translations/spanish/sections/production/assigntransactionid.md delete mode 100644 translations/spanish/sections/production/bestateless.md delete mode 100644 translations/spanish/sections/production/createmaintenanceendpoint.md delete mode 100644 translations/spanish/sections/production/delegatetoproxy.md delete mode 100644 translations/spanish/sections/production/detectvulnerabilities.md delete mode 100644 translations/spanish/sections/production/frontendout.md delete mode 100644 translations/spanish/sections/production/guardprocess.md delete mode 100644 translations/spanish/sections/production/lockdependencies.md delete mode 100644 translations/spanish/sections/production/measurememory.md delete mode 100644 translations/spanish/sections/production/monitoring.md delete mode 100644 translations/spanish/sections/production/productoncode.md delete mode 100644 translations/spanish/sections/production/setnodeenv.md delete mode 100644 translations/spanish/sections/production/smartlogging.md delete mode 100644 translations/spanish/sections/production/utilizecpu.md delete mode 100644 translations/spanish/sections/projectstructre/breakintcomponents.md delete mode 100644 translations/spanish/sections/projectstructre/configguide.md delete mode 100644 translations/spanish/sections/projectstructre/createlayers.md delete mode 100644 translations/spanish/sections/projectstructre/separateexpress.md delete mode 100644 translations/spanish/sections/projectstructre/thincomponents.md delete mode 100644 translations/spanish/sections/projectstructre/wraputilities.md delete mode 100644 translations/spanish/sections/template.md delete mode 100644 translations/spanish/sections/testingandquality/bumpversion.md diff --git a/README.md b/README.md index f6a7f9d85..35e166510 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [✔]: assets/images/checkbox-small-blue.png -# Node.js Best Practices +# Mejores prácticas de NodeJS

Node.js Best Practices @@ -14,207 +14,198 @@
- [![nodepractices](/assets/images/twitter-s.png)](https://twitter.com/nodepractices/) **Follow us on Twitter!** [**@nodepractices**](https://twitter.com/nodepractices/) + [![nodepractices](/assets/images/twitter-s.png)](https://twitter.com/nodepractices/) **¡Síguenos en Twitter!** [**@nodepractices**](https://twitter.com/nodepractices/)
-# Welcome! 3 Things You Ought To Know First: -**1. When you read here, you in fact read dozens of the best Node.JS articles -** this is a summary and curation of the top-ranked content on Node JS best practices +# ¡Bienvenido! 3 cosas que necesitas saber primero: +**1. Cuando lees aquí, lees docenas de los mejores artículos de Node.JS -** este es un resumen y conservación del contenido mejor clasificado de las mejores prácticas de NodeJS -**2. It's the largest compilation, and it growing every week -** currently, more than 50 practices, style guide, and architectural tips are presented. We welcome issues and PR to ever keep this live book updated. We'd love to see you contributing here, whether fixing some code mistake or suggesting brilliant new ideas - be part of the Node.JS best practices book +**2. Es la compilación más grande y crece cada semana -** actualmente, se presentan más de 50 prácticas, guías de estilo y consejos arquitectónicos. Damos la bienvenida a issues y pull requests para mantener este libro actualizado. Nos encantaría verte contribuir aquí, ya sea corrigiendo algunos errores de código o sugiriendo nuevas ideas brillantes: se parte del libro de mejores prácticas de Node.JS -**3. Most bullets have additional info -** nearby most best practice bullets you'll find **🔗Read More** link that will present you with code examples, quotes from selected blogs and more info +**3. La mayoría de los puntos tiene información adicional -** Encontrás cerca de los puntos de mejores prácticas el enlace **🔗Leer más** que te dará algunos ejemplos de código, citas de blogs seleccionados y mas información


-## Table of Contents -1. [Project structure Practices (5)](#1-project-structure-practices) -2. [Error Handling Practices (11) ](#2-error-handling-practices) -3. [Code Style Practices (12) ](#3-code-style-practices) -4. [Testing And Overall Quality Practices (8) ](#4-testing-and-overall-quality-practices) -5. [Going To Production Practices (16) ](#5-going-to-production-practices) -6. Security Practices (coming soon) -7. Performance Practices (coming soon) +## Tabla de contenidos +1. [Prácticas para estructura del proyecto (5)](#1-project-structure-practices) +2. [Prácticas en manejo de errores (11) ](#2-error-handling-practices) +3. [Prácticas de estilo de código (12) ](#3-code-style-practices) +4. [Practicas de prueba y calidad en general (8) ](#4-testing-and-overall-quality-practices) +5. [Prácticas de puesta en producción (16) ](#5-going-to-production-practices) +6. Prácticas de Seguridad (próximamente) +7. Prácticas de Rendimiento (próximamente)


-# `1. Project Structure Practices` +# `1. Prácticas de estructura del proyecto` -## ![✔] 1.1 Structure your solution by components +## ![✔] 1.1 Estructura tu solución en componentes - **TL;DR:** The worst large applications pitfall is maintaining a huge code base with hundreds of dependencies - such a monolith slows down developers as they try to incorporate new features. Instead, partition your code into components, each gets its own folder or a dedicated codebase, and ensure that each unit is kept small and simple. Visit 'Read More' below to see examples of correct project structure +**TL;DR:** El peor inconveniente de las grandes aplicaciones es mantener una gran base de código con cientos de dependencias, un monolito que ralentiza a los desarrolladores que intentan incorporar nuevas características. En cambio, particiona tu código en componentes, cada uno obtiene su propia carpeta o una base de código dedicada, y asegúrate de que cada unidad se mantenga pequeña y simple. Visita 'Leer más' a continuación para ver ejemplos de la estructura correcta del proyecto -**Otherwise:** When developers who code new features struggle to realize the impact of their change and fear to break other dependant components - deployments become slower and more risky. It's also considered harder to scale-out when all the business units are not separated +**De lo contrario:** Cuando desarrolladores codifican nuevas características luchan por darse cuenta del impacto de su cambio y temen romper otros componentes dependientes - las implementaciones se vuelven más lentas y más riesgosas. También se considera más difícil escalar cuando todas las unidades de negocios no están separadas -🔗 [**Read More: structure by components**](/sections/projectstructre/breakintcomponents.md) +🔗 [**Leer más: estructura en componentes**](/sections/projectstructre/breakintcomponents.md)

-## ![✔] 1.2 Layer your components, keep Express within its boundaries +## ![✔] 1.2 Aplicar capas para componentes, mantén Express dentro de sus límites -**TL;DR:** Each component should contain 'layers' - a dedicated object for the web, logic and data access code. This not only draws a clean separation of concerns but also significantly ease mocking and testing the system. Though this is a very common pattern, API developers tend to mix layers by passing the web layer objects (Express req, res) to business logic and data layers - this makes your application dependant on and accessible by Express only +**TL;DR:** Cada componente debería contener 'capas'- un objeto dedicado para la web, la lógica y código para acceso a datos. Esto no solo genera una clara separación de conceptos sino que también facilita significativamente los mocks y la pruebas del sistema. Aunque este es un patrón muy común, los desarrolladores de APIs tienden a mezclar capas pasando los objetos de la capa web (Express req, res) a la lógica de negocios y capas de datos, esto hace que su aplicación dependa y solo sea accesible por Express. -**Otherwise:** App that mixes web objects with other layers can not be accessed by testing code, CRON jobs and other non-Express callers +**De lo contrario:** Una aplicación que mezcla objectos de web con otras capas no puede ser accedida por código de pruebas, CRON jobs y otras llamadas que no son de Express. -🔗 [**Read More: layer your app**](/sections/projectstructre/createlayers.md) +🔗 [**Leer más: Aplicar capas a tu aplicación**](/sections/projectstructre/createlayers.md)

-## ![✔] 1.3 Wrap common utilities as NPM packages +## ![✔] 1.3 Envuelve las utilidades comunes como paquetes de NPM -**TL;DR:** In a large app that constitues multiple code base, cross-cutting-conern utilities like logger, encryption and a like, should be wrapped by your own code and exposed as private NPM packages. This allows sharing them among multiple code bases and projects +**TL;DR:** En una aplicación grande que se constituye de múltples bases de código, utilidades transversales como los loggers, cifrado y similares, deben de estar envueltos por su propio código y expuestos como paquetes privados de NPM. Esto permite compartirlos entre múltiples base de código y proyectos. -**Otherwise:** You'll have to invent your own deployment and dependency wheel +**De lo contrario:** Tendrás que inventar tu propia implementación y rueda de dependencia -🔗 [**Read More: Structure by feature**](/sections/projectstructre/wraputilities.md) +🔗 [**Leer más: Estructura por característica**](/sections/projectstructre/wraputilities.md)

-## ![✔] 1.4 Separate Express 'app' and 'server' +## ![✔] 1.4 Separar 'servidor' y 'aplicación' de express -**TL;DR:** Avoid the nasty habit of defining the entire [Express](https://expressjs.com/) app in a single huge file - separate your 'Express' definition to at least two files: the API declaration (app.js) and the networking concerns (WWW).For even better structure, locate your API declaration within components +**TL;DR:** Evite el desagradable hábito de definir toda la aplicación [Express](https://expressjs.com/) en un único archivo enorme; separa tú definición de 'Express' en al menos dos archivos: la declaración del API (app.js) y los características de red (WWW). Incluso para una mejor estructura, ubica tu declaración del API dentro de los componentes. -**Otherwise:** Your API will be accessible for testing via HTTP calls only (slower and much harder to generate coverage reports). It will also probably won't be a big pleasure to maintain hundreds of lines of code in a single file +**De lo contrario:** Se podrá acceder a tu API para realizar pruebas solo a través de llamadas HTTP (más lento y mucho más difícil para generar informes de cobertura). Probablemente tampoco sea un placer enorme mantener cientos de líneas de código en un solo archivo -🔗 [**Read More: separate Express 'app' and 'server'**](/sections/projectstructre/separateexpress.md) +🔗 [**Leer más: separar 'servidor' y 'aplicación' de express**](/sections/projectstructre/separateexpress.md)

-## ![✔] 1.5 Use environment aware, secure and hirearchical config +## ![✔] 1.5 Usar una configuración segura, jerárquica y consciente del entorno -**TL;DR:** The perfect and flawless configuration setup must include (a) keys that can be read from file AND from environment variable (b) secrets are kept outside committed code (c) config is hierarchical for easier findability. There are only a few packages that can help tick most of those boxes like [nconf](https://www.npmjs.com/package/nconf) and [config](https://www.npmjs.com/package/config) +**TL;DR:** La configuración perfecta e impecable debe incluir (a) claves que se pueden leer desde el archivo Y desde la variable de entorno (b) los secretos se guardan fuera del código al que se ha hecho commit (c) config es jerárquica para facilitar la localización. Solo hay unos pocos paquetes que pueden ayudar a validar la mayoría de estos casos como [nconf](https://www.npmjs.com/package/nconf) y [config](https://www.npmjs.com/package/config) -**Otherwise:** Failing to satisfy any of the config requirements will simply bog down the development team or devpos team. Probably both +**De lo contrario:** No cumplir con ninguno de los requisitos de configuración simplemente frena al equipo de desarrollo o al equipo de devpos. Probablemente ambos -🔗 [**Read More: configuration best practices**](/sections/projectstructre/configguide.md) +🔗 [**Leer más: buenas prácticas de configuración **](/sections/projectstructre/configguide.md)


-

⬆ Return to top

+

⬆ Regresar arriba

-# `2. Error Handling Practices` +# `2. Prácticas en manejo de errores` -## ![✔] 2.1 Use Async-Await or promises for async error handling +## ![✔] 2.1 Usa Async-Await o promesas para manejo de errores asíncronos -**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch +**TL;DR:** El manejo de errores asincrónicos en el estilo de callback es probablemente la manera más rápida de ir al infierno (a.k.a, pyramid of doom o pirámide de la perdición). El mejor regalo que le puedes dar a tu código es utilizar una biblioteca de promesas seria o async-await que proporcione una sintaxis de código muy compacta y familiar como try-catch -**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns +**De lo contrario:** El estilo de callback de Node.JS, function (err, response), es una forma prometedora de código no mantenible debido a la combinación de manejo de errores con código accidentado, anidación excesiva y patrones de codificación incómodos -🔗 [**Read More: avoiding callbacks**](/sections/errorhandling/asyncerrorhandling.md) +🔗 [** Leer más: evitar callbacks **] (/sections/errorhandling/asyncerrorhandling.md)

-## ![✔] 2.2 Use only the built-in Error object -**TL;DR:** Many throws errors as a string or as some custom type – this complicates the error handling logic and the interoperability between modules. Whether you reject a promise, throw exception or emit error – using only the built-in Error object will increases uniformity and prevents loss of information +## ![✔] 2.2 Usa solo el objeto Error incorporado +**TL;DR:** Muchos arrojan errores como una cadena o como un tipo personalizado; esto complica la lógica de manejo de errores y la interoperabilidad entre módulos. Ya sea que rechace una promesa, genere una excepción o emita un error, usar solo el objeto de Error incorporado aumentará la uniformidad y evitará la pérdida de información. -**Otherwise:** When invoking some component, being uncertain which type of errors come in return – makes it much harder to handle errors properly. Even worse, using custom types to describe errors might lead to loss of critical error information like the stack trace! +**De lo contrario:** Al invocar algún componente, no estar seguro de qué tipo de errores son retornados, hace que sea mucho más difícil manejar los errores de forma adecuada. Peor aún, el uso de tipos personalizados para describir los errores puede conducir a la pérdida de información de error crítico como el seguimiento de la pila. -🔗 [**Read More: using the built-in error object**](/sections/errorhandling/useonlythebuiltinerror.md) +🔗 [** Leer más: utilizando el objeto de Error incorporado **](/sections/errorhandling/useonlythebuiltinerror.md)

-## ![✔] 2.3 Distinguish operational vs programmer errors +##! [✔] 2.3 Distinguir errores operacionales contra errores del programador -**TL;DR:** Operational errors (e.g. API received an invalid input) refer to known cases where the error impact is fully understood and can be handled thoughtfully. On the other hand, programmer error (e.g. trying to read undefined variable) refers to unknown code failures that dictate to gracefully restart the application +**TL;DR:** Los errores operacionales (por ejemplo, el API recibió una entrada no válida) se refieren a casos conocidos en los que el impacto del error se entiende completamente y se pueden manejar con cuidado. Por otro lado, el error del programador (por ejemplo, tratar de leer la variable no definida) se refiere a fallas desconocidas del código que ordenan reiniciar correctamente la aplicación -**Otherwise:** You may always restart the application when an error appear, but why letting ~5000 online users down because of a minor, predicted, operational error? the opposite is also not ideal – keeping the application up when unknown issue (programmer error) occurred might lead to an unpredicted behavior. Differentiating the two allows acting tactfully and applying a balanced approach based on the given context +**De lo contrario:** Siempre puedes reiniciar la aplicación cuando aparece un error, pero ¿por qué dejar ~5000 usuarios en línea abajo debido a un error operacional menor, previsto? lo contrario tampoco es ideal: mantener la aplicación activa cuando se produce un problema desconocido (error del programador) puede provocar un comportamiento imprevisto. La diferenciación de los dos permite actuar con tacto y aplicar un enfoque equilibrado basado en el contexto dado - 🔗 [**Read More: operational vs programmer error**](/sections/errorhandling/operationalvsprogrammererror.md) +  🔗 [** Leer más: error operacional vs programador **](/sections/errorhandling/operationalvsprogrammererror.md)

-## ![✔] 2.4 Handle errors centrally, not within an Express middleware +##! [✔] 2.4 Manejar los errores centralmente, no dentro de un middleware Express -**TL;DR:** Error handling logic such as mail to admin and logging should be encapsulated in a dedicated and centralized object that all end-points (e.g. Express middleware, cron jobs, unit-testing) call when an error comes in. +**TL;DR:** La lógica de manejo de errores, como un correo al administrador y registro de logs, debe encapsularse en un objeto dedicado y centralizado al que todos los end-points (por ejemplo, Express middleware, cron jobs, unit-testing) llaman cuando se produce un error . -**Otherwise:** Not handling errors within a single place will lead to code duplication and probably to errors that are handled improperly +**De lo contrario:** No manejar los errores dentro de un solo lugar dará lugar a la duplicación del código y, probablemente, a los errores que se manejan de forma incorrecta -🔗 [**Read More: handling errors in a centralized place**](/sections/errorhandling/centralizedhandling.md) +🔗 [** Leer más: manejo de errores en un lugar centralizado **](/sections/errorhandling/centralizedhandling.md)

-## ![✔] 2.5 Document API errors using Swagger - -**TL;DR:** Let your API callers know which errors might come in return so they can handle these thoughtfully without crashing. This is usually done with REST API documentation frameworks like Swagger +##! [✔] 2.5 Errores del API Document con Swagger -**Otherwise:** An API client might decide to crash and restart only because he received back an error he couldn’t understand. Note: the caller of your API might be you (very typical in a microservices environment) +**TL;DR:** Deja que los clientes de tu API sepan qué errores podrían presentarse como respuesta para que puedan manejarlos cuidadosamente sin fallar. Esto se hace generalmente con frameworks de documentación REST API como Swagger +**De lo contrario:** Un cliente del API podría decidir bloquearse y reiniciarse solo porque recibió un error que no pudo entender. Nota: la persona que llama de su API puede ser tu (muy típico en un entorno de microservicios) -🔗 [**Read More: documenting errors in Swagger**](/sections/errorhandling/documentingusingswagger.md) +🔗 [** Leer más: documentación de errores en Swagger **](/sections/errorhandling/documentingusingswagger.md)

-## ![✔] 2.6 Shut the process gracefully when a stranger comes to town +##! [✔] 2.6 Cerrar el proceso elegantemento cuando un extraño llega -**TL;DR:** When an unknown error occurs (a developer error, see best practice number #3)- there is uncertainty about the application healthiness. A common practice suggests restarting the process carefully using a ‘restarter’ tool like Forever and PM2 +**TL; DR:** Cuando se produce un error desconocido (un error del desarrollador, consulta el número de práctica recomendada número #3): existe incertidumbre acerca del estado de la aplicación. Una práctica común sugiere reiniciar el proceso cuidadosamente usando una herramienta 'reiniciadora' como Forever y PM2. -**Otherwise:** When an unfamiliar exception is caught, some object might be in a faulty state (e.g an event emitter which is used globally and not firing events anymore due to some internal failure) and all future requests might fail or behave crazily +**De lo contrario:** Cuando se detecta una excepción desconocida, algunos objetos pueden estar en un estado defectuoso (por ejemplo, un emisor de eventos que se usa globalmente y que ya no se activan debido a fallas internas) y todas las solicitudes futuras pueden fallar o comportarse de manera loca -🔗 [**Read More: shutting the process**](/sections/errorhandling/shuttingtheprocess.md) +🔗 [** Leer más: cerrar el proceso **] (/sections/errorhandling/shuttingtheprocess.md)

+##! [✔] 2.7 Usa un logger maduro para aumentar la visibilidad de los errores +**TL;DR:** Un conjunto de herramientas de registro maduras como Winston, Bunyan o Log4J acelerará el descubrimiento y la comprensión de errores. Así que olvídate de console.log. -## ![✔] 2.7 Use a mature logger to increase errors visibility - -**TL;DR:** A set of mature logging tools like Winston, Bunyan or Log4J, will speed-up error discovery and understanding. So forget about console.log. - -**Otherwise:** Skimming through console.logs or manually through messy text file without querying tools or a decent log viewer might keep you busy at work until late - -🔗 [**Read More: using a mature logger**](/sections/errorhandling/usematurelogger.md) +**De lo contrario:** Navegando a través de console.logs o manualmente a través de un archivo de texto desordenado sin consultar herramientas o un lector de registro decente puede mantenerte ocupado en el trabajo hasta tarde +🔗 [** Leer más: utilizando un registrador maduro **] (/sections/errorhandling/usematurelogger.md)

+##! [✔] 2.8 Flujos de errores de prueba usando su test framework favorito -## ![✔] 2.8 Test error flows using your favorite test framework - -**TL;DR:** Whether professional automated QA or plain manual developer testing – Ensure that your code not only satisfies positive scenario but also handle and return the right errors. Testing framework like Mocha & Chai can handle this easily (see code examples within the "Gist popup") +**TL;DR:** Ya sea que se trate de un profesional de QA automatizado o de una prueba de desarrollador manual: asegúrate de que tu código no solo satisfaga un escenario positivo sino que también maneje y devuelva los errores correctos. Frameworks de prueba como Mocha & Chai pueden manejar esto fácilmente (vea ejemplos de código dentro del "Gist emergente") -**Otherwise:** Without testing, whether automatically or manually, you can’t rely on our code to return the right errors. Without meaningful errors – there’s no error handling +**De lo contrario:** Sin pruebas, ya sea automática o manualmente, no puedes confiar en nuestro código para devolver los errores correctos. Sin errores significativos, no hay manejo de errores +🔗 [** Leer más: probar los flujos de error **] (/sections/errorhandling/testingerrorflows.md) -🔗 [**Read More: testing error flows**](/sections/errorhandling/testingerrorflows.md) +

-

- -## ![✔] 2.9 Discover errors and downtime using APM products - -**TL;DR:** Monitoring and performance products (a.k.a APM) proactively gauge your codebase or API so they can auto-magically highlight errors, crashes and slow parts that you were missing +##! [✔] 2.9 Descubre errores y tiempo de inactividad usando productos APM -**Otherwise:** You might spend great effort on measuring API performance and downtimes, probably you’ll never be aware which are your slowest code parts under real world scenario and how these affects the UX +**TL;DR:** Los productos de monitoreo y rendimiento (a.k.a APM) miden de forma proactiva tu base de código o API para auto-mágicamente resaltar errores, bloqueos y ralentizar automáticamente partes que echas en falta. +**De lo contrario:** Es posible que dediques un gran esfuerzo a medir el rendimiento y los tiempos de inactividad de la API, probablemente nunca sabrás cuáles son las piezas de código más lentas en el escenario del mundo real y cómo afectan estas a la experiencia del usuario. -🔗 [**Read More: using APM products**](/sections/errorhandling/apmproducts.md) +🔗 [** Leer más: utilizando productos APM **] (/sections/errorhandling/apmproducts.md)

+##! [✔] 2.10 Captura rechazos de promesas no controladas -## ![✔] 2.10 Catch unhandled promise rejections +**TL;DR:** Cualquier excepción lanzada dentro de una promesa será tragada y descartada a menos que un desarrollador no se olvide de manejarla de manera explícita. ¡Incluso si su código está suscrito a process.uncaughtException! Supera esto registrándose en el proceso del evento. -**TL;DR:** Any exception thrown within a promise will get swallowed and discarded unless a developer didn’t forget to explictly handle. Even if you’re code is subscribed to process.uncaughtException! Overcome this by registering to the event process.unhandledRejection +**De lo contrario:** Tus errores serán tragados y no dejarán rastros. Nada de que preocuparse -**Otherwise:** Your errors will get swallowed and leave no trace. Nothing to worry about - - -🔗 [**Read More: catching unhandled promise rejection **](/sections/errorhandling/catchunhandledpromiserejection.md) +🔗 [** Leer más: captura rechazos de promesas no controladas **] (/sections/errorhandling/catchunhandledpromiserejection.md)

-## ![✔] 2.11 Fail fast, validate arguments using a dedicated library +##! [✔] 2.11 Falla rápidamente, valida argumentos usando una biblioteca dedicada -**TL;DR:** This should be part of your Express best practices – Assert API input to avoid nasty bugs that are much harder to track later. Validation code is usually tedious unless using a very cool helper libraries like Joi +**TL; DR:** Esto debería ser parte de sus mejores prácticas para Express - API de Assert para evitar errores desagradables que son mucho más difíciles de seguir más adelante. El código de validación suele ser tedioso a menos que se utilicen bibliotecas muy interesantes como Joi -**Otherwise:** Consider this – your function expects a numeric argument “Discount” which the caller forgets to pass, later on your code checks if Discount!=0 (amount of allowed discount is greater than zero), then it will allow the user to enjoy a discount. OMG, what a nasty bug. Can you see it? +**De lo contrario:** Considera esto: tu función espera un argumento numérico "Descuento" que la persona que llama olvida pasar, más adelante su código comprueba si Descuento!= 0 (cantidad de descuento permitido es mayor que cero), entonces permitirás el usuario que disfrute de un descuento. Dios mío, qué desagradable error. ¿Puedes verlo? -🔗 [**Read More: failing fast**](/sections/errorhandling/failfast.md) +🔗 [** Leer más: falla rapidamente **] (/sections/errorhandling/failfast.md)


-

⬆ Return to top

+

⬆ Regresar arriba

# `3. Code Style Practices` diff --git a/translations/spanish/README.md b/translations/spanish/README.md deleted file mode 100644 index eb7a0d83d..000000000 --- a/translations/spanish/README.md +++ /dev/null @@ -1,674 +0,0 @@ -[✔]: ../../assets/images/checkbox-small-blue.png - -# Mejores prácticas de NodeJS - -

- Node.js Best Practices -

- -
- -
-50 items Last update: Oct 20, 2017 Updated for Node v.8.4 -
- -
- - [![nodepractices](/assets/images/twitter-s.png)](https://twitter.com/nodepractices/) **¡Síguenos en Twitter!** [**@nodepractices**](https://twitter.com/nodepractices/) -
- -# ¡Bienvenido! 3 cosas que necesitas saber primero: -**1. Cuando lees aquí, lees docenas de los mejores artículos de Node.JS -** este es un resumen y conservación del contenido mejor clasificado de las mejores prácticas de NodeJS - -**2. Es la compilación más grande y crece cada semana -** actualmente, se presentan más de 50 prácticas, guías de estilo y consejos arquitectónicos. Damos la bienvenida a issues y pull requests para mantener este libro actualizado. Nos encantaría verte contribuir aquí, ya sea corrigiendo algunos errores de código o sugiriendo nuevas ideas brillantes: se parte del libro de mejores prácticas de Node.JS - -**3. La mayoría de los puntos tiene información adicional -** Encontrás cerca de los puntos de mejores prácticas el enlace **🔗Leer más** que te dará algunos ejemplos de código, citas de blogs seleccionados y mas información - -


- -## Tabla de contenidos -1. [Prácticas para estructura del proyecto (5)](#1-project-structure-practices) -2. [Prácticas en manejo de errores (11) ](#2-error-handling-practices) -3. [Prácticas de estilo de código (12) ](#3-code-style-practices) -4. [Practicas de prueba y calidad en general (8) ](#4-testing-and-overall-quality-practices) -5. [Prácticas de puesta en producción (16) ](#5-going-to-production-practices) -6. Prácticas de Seguridad (próximamente) -7. Prácticas de Rendimiento (próximamente) - -


-# `1. Prácticas de estructura del proyecto` - -## ![✔] 1.1 Estructura tu solución en componentes - -**TL;DR:** El peor inconveniente de las grandes aplicaciones es mantener una gran base de código con cientos de dependencias, un monolito que ralentiza a los desarrolladores que intentan incorporar nuevas características. En cambio, particiona tu código en componentes, cada uno obtiene su propia carpeta o una base de código dedicada, y asegúrate de que cada unidad se mantenga pequeña y simple. Visita 'Leer más' a continuación para ver ejemplos de la estructura correcta del proyecto - -**De lo contrario:** Cuando desarrolladores codifican nuevas características luchan por darse cuenta del impacto de su cambio y temen romper otros componentes dependientes - las implementaciones se vuelven más lentas y más riesgosas. También se considera más difícil escalar cuando todas las unidades de negocios no están separadas - -🔗 [**Leer más: estructura en componentes**](/sections/projectstructre/breakintcomponents.md) - -

- -## ![✔] 1.2 Aplicar capas para componentes, mantén Express dentro de sus límites - -**TL;DR:** Cada componente debería contener 'capas'- un objeto dedicado para la web, la lógica y código para acceso a datos. Esto no solo genera una clara separación de conceptos sino que también facilita significativamente los mocks y la pruebas del sistema. Aunque este es un patrón muy común, los desarrolladores de APIs tienden a mezclar capas pasando los objetos de la capa web (Express req, res) a la lógica de negocios y capas de datos, esto hace que su aplicación dependa y solo sea accesible por Express. - -**De lo contrario:** Una aplicación que mezcla objectos de web con otras capas no puede ser accedida por código de pruebas, CRON jobs y otras llamadas que no son de Express. - -🔗 [**Leer más: Aplicar capas a tu aplicación**](/sections/projectstructre/createlayers.md) - -

- -## ![✔] 1.3 Envuelve las utilidades comunes como paquetes de NPM - -**TL;DR:** En una aplicación grande que se constituye de múltples bases de código, utilidades transversales como los loggers, cifrado y similares, deben de estar envueltos por su propio código y expuestos como paquetes privados de NPM. Esto permite compartirlos entre múltiples base de código y proyectos. - -**De lo contrario:** Tendrás que inventar tu propia implementación y rueda de dependencia - -🔗 [**Leer más: Estructura por característica**](/sections/projectstructre/wraputilities.md) - -

- -## ![✔] 1.4 Separar 'servidor' y 'aplicación' de express - -**TL;DR:** Evite el desagradable hábito de definir toda la aplicación [Express](https://expressjs.com/) en un único archivo enorme; separa tú definición de 'Express' en al menos dos archivos: la declaración del API (app.js) y los características de red (WWW). Incluso para una mejor estructura, ubica tu declaración del API dentro de los componentes. - -**De lo contrario:** Se podrá acceder a tu API para realizar pruebas solo a través de llamadas HTTP (más lento y mucho más difícil para generar informes de cobertura). Probablemente tampoco sea un placer enorme mantener cientos de líneas de código en un solo archivo - -🔗 [**Leer más: separar 'servidor' y 'aplicación' de express**](/sections/projectstructre/separateexpress.md) - -

- -## ![✔] 1.5 Usar una configuración segura, jerárquica y consciente del entorno - -De lo contrario: no cumplir con ninguno de los requisitos de configuración simplemente empantana el equipo de desarrollo o el equipo devpos. Probablemente ambos - -🔗 Leer más: mejores prácticas de configuración - -**TL;DR:** La configuración perfecta e impecable debe incluir (a) claves que se pueden leer desde el archivo Y desde la variable de entorno (b) los secretos se guardan fuera del código al que se ha hecho commit (c) config es jerárquica para facilitar la localización. Solo hay unos pocos paquetes que pueden ayudar a validar la mayoría de estos casos como [nconf](https://www.npmjs.com/package/nconf) y [config](https://www.npmjs.com/package/config) - -**De lo contrario:** No cumplir con ninguno de los requisitos de configuración simplemente frena al equipo de desarrollo o al equipo de devpos. Probablemente ambos - -🔗 [**Leer más: buenas prácticas de configuración **](/sections/projectstructre/configguide.md) - - -


- -

⬆ Regresar arriba

- -# `2. Prácticas en manejo de errores` - -## ![✔] 2.1 Usa Async-Await o promesas para manejo de errores asíncronos - -**TL;DR:** Handling async errors in callback style is probably the fastest way to hell (a.k.a the pyramid of doom). The best gift you can give to your code is using instead a reputable promise library or async-await which provides much compact and familiar code syntax like try-catch - -**Otherwise:** Node.JS callback style, function(err, response), is a promising way to un-maintainable code due to the mix of error handling with casual code, excessive nesting and awkward coding patterns - -🔗 [**Read More: avoiding callbacks**](/sections/errorhandling/asyncerrorhandling.md) - -

- -## ![✔] 2.2 Use only the built-in Error object - -**TL;DR:** Many throws errors as a string or as some custom type – this complicates the error handling logic and the interoperability between modules. Whether you reject a promise, throw exception or emit error – using only the built-in Error object will increases uniformity and prevents loss of information - - -**Otherwise:** When invoking some component, being uncertain which type of errors come in return – makes it much harder to handle errors properly. Even worse, using custom types to describe errors might lead to loss of critical error information like the stack trace! - -🔗 [**Read More: using the built-in error object**](/sections/errorhandling/useonlythebuiltinerror.md) - -

- -## ![✔] 2.3 Distinguish operational vs programmer errors - -**TL;DR:** Operational errors (e.g. API received an invalid input) refer to known cases where the error impact is fully understood and can be handled thoughtfully. On the other hand, programmer error (e.g. trying to read undefined variable) refers to unknown code failures that dictate to gracefully restart the application - -**Otherwise:** You may always restart the application when an error appear, but why letting ~5000 online users down because of a minor, predicted, operational error? the opposite is also not ideal – keeping the application up when unknown issue (programmer error) occurred might lead to an unpredicted behavior. Differentiating the two allows acting tactfully and applying a balanced approach based on the given context - - 🔗 [**Read More: operational vs programmer error**](/sections/errorhandling/operationalvsprogrammererror.md) - -

- -## ![✔] 2.4 Handle errors centrally, not within an Express middleware - -**TL;DR:** Error handling logic such as mail to admin and logging should be encapsulated in a dedicated and centralized object that all end-points (e.g. Express middleware, cron jobs, unit-testing) call when an error comes in. - -**Otherwise:** Not handling errors within a single place will lead to code duplication and probably to errors that are handled improperly - -🔗 [**Read More: handling errors in a centralized place**](/sections/errorhandling/centralizedhandling.md) - -

- -## ![✔] 2.5 Document API errors using Swagger - -**TL;DR:** Let your API callers know which errors might come in return so they can handle these thoughtfully without crashing. This is usually done with REST API documentation frameworks like Swagger - -**Otherwise:** An API client might decide to crash and restart only because he received back an error he couldn’t understand. Note: the caller of your API might be you (very typical in a microservices environment) - - -🔗 [**Read More: documenting errors in Swagger**](/sections/errorhandling/documentingusingswagger.md) - -

- -## ![✔] 2.6 Shut the process gracefully when a stranger comes to town - -**TL;DR:** When an unknown error occurs (a developer error, see best practice number #3)- there is uncertainty about the application healthiness. A common practice suggests restarting the process carefully using a ‘restarter’ tool like Forever and PM2 - -**Otherwise:** When an unfamiliar exception is caught, some object might be in a faulty state (e.g an event emitter which is used globally and not firing events anymore due to some internal failure) and all future requests might fail or behave crazily - -🔗 [**Read More: shutting the process**](/sections/errorhandling/shuttingtheprocess.md) - -

- - - -## ![✔] 2.7 Use a mature logger to increase errors visibility - -**TL;DR:** A set of mature logging tools like Winston, Bunyan or Log4J, will speed-up error discovery and understanding. So forget about console.log. - -**Otherwise:** Skimming through console.logs or manually through messy text file without querying tools or a decent log viewer might keep you busy at work until late - -🔗 [**Read More: using a mature logger**](/sections/errorhandling/usematurelogger.md) - - -

- - -## ![✔] 2.8 Test error flows using your favorite test framework - -**TL;DR:** Whether professional automated QA or plain manual developer testing – Ensure that your code not only satisfies positive scenario but also handle and return the right errors. Testing framework like Mocha & Chai can handle this easily (see code examples within the "Gist popup") - -**Otherwise:** Without testing, whether automatically or manually, you can’t rely on our code to return the right errors. Without meaningful errors – there’s no error handling - - -🔗 [**Read More: testing error flows**](/sections/errorhandling/testingerrorflows.md) - -

- -## ![✔] 2.9 Discover errors and downtime using APM products - -**TL;DR:** Monitoring and performance products (a.k.a APM) proactively gauge your codebase or API so they can auto-magically highlight errors, crashes and slow parts that you were missing - -**Otherwise:** You might spend great effort on measuring API performance and downtimes, probably you’ll never be aware which are your slowest code parts under real world scenario and how these affects the UX - - -🔗 [**Read More: using APM products**](/sections/errorhandling/apmproducts.md) - -

- - -## ![✔] 2.10 Catch unhandled promise rejections - -**TL;DR:** Any exception thrown within a promise will get swallowed and discarded unless a developer didn’t forget to explictly handle. Even if you’re code is subscribed to process.uncaughtException! Overcome this by registering to the event process.unhandledRejection - -**Otherwise:** Your errors will get swallowed and leave no trace. Nothing to worry about - - -🔗 [**Read More: catching unhandled promise rejection **](/sections/errorhandling/catchunhandledpromiserejection.md) - -

- -## ![✔] 2.11 Fail fast, validate arguments using a dedicated library - -**TL;DR:** This should be part of your Express best practices – Assert API input to avoid nasty bugs that are much harder to track later. Validation code is usually tedious unless using a very cool helper libraries like Joi - -**Otherwise:** Consider this – your function expects a numeric argument “Discount” which the caller forgets to pass, later on your code checks if Discount!=0 (amount of allowed discount is greater than zero), then it will allow the user to enjoy a discount. OMG, what a nasty bug. Can you see it? - -🔗 [**Read More: failing fast**](/sections/errorhandling/failfast.md) - -


- -

⬆ Return to top

- -# `3. Code Style Practices` - -## ![✔] 3.1 Use ESLint - -**TL;DR:** ESLint is the de-facto standard for checking code style, not only to identify nitty-gritty spacing issues but also to detect serious code anti-patterns like developers throwing errors without classification. Using ESLint and following the rest of the code style practices below means following the same styles used by the rest of the community, as well as the same code styles used in the core products themselves. - -**Otherwise:** developers will focus on tedious spacing and line-width concerns - -

- -## ![✔] 3.2 Node JS Specific Plugins - -**TL;DR:** On top of ESLint standard rules that cover vanilla JS only, add Node-specific plugins like [eslint-plugin-node](https://www.npmjs.com/package/eslint-plugin-node), [eslint-plugin-mocha](https://www.npmjs.com/package/eslint-plugin-mocha) and [eslint-plugin-node-security](https://www.npmjs.com/package/eslint-plugin-security) - -**Otherwise:** Many faulty Node.JS code patterns might escape under the radar. For example, developers might require(variableAsPath) files with a variable given as path which allows attackers to execute any JS script. Node.JS linters can detect such patterns and complain early - -

- -## ![✔] 3.3 Start a Codeblock's Curly Braces in the Same Line - -**TL;DR:** The opening curly braces of a code block should be in the same line of the opening statement. - -### Code Example -```javascript - // Do - function someFunction() { - // code block - } - - //Avoid - function someFunction - { - // code block - } -``` - -**Otherwise:** Deferring from this best practice might lead to unexpected results, as can be seen in the Stackoverflow thread below: - -🔗 [**Read more:** "Why does a results vary based on curly brace placement?" (Stackoverflow)](https://stackoverflow.com/questions/3641519/why-does-a-results-vary-based-on-curly-brace-placement) - -

- -## ![✔] 3.4 Don't Forget the Semicolon - -**TL;DR:** While not unanimously agreed upon, it is still recommended to put a semicolon at the end of each statement. This will make your code more readable and explicit to other developers who read it. - -**Otherwise:** As seen in the previous section, Javascript's interpeter auto adds semicolon at the end of a statement if there isn't one which can lead to some undesired results. - -

- -## ![✔] 3.5 Name Your Functions - -**TL;DR:** Name all functions, including closures and callbacks. Avoid anonymous functions. This is especially useful when profiling a node app. Naming all functions will allow you to easily understand what you're looking at when cheking a memory snapshot. - -**Otherwise:** Debugging production issues using a core dump (memory snapshot) might become challenging as you notice significant memory consumption from functions with no name. - -

- -## ![✔] 3.6 Naming conventions for variables, constants, functions and classes - -**TL;DR:** Use ***lowerCamelCase*** when naming variables and functions, ***UpperCamelCase*** (capital first letter as well) when naming classes and ***UPPERCASE*** for constants. This will help you to easily distinguish between plain variables / functions, and classes that require instantiation. Use descriptive names, but try to keep them short. - -**Otherwise:** Javascript is the only language in the world which allows to invoke a constructor ("Class") directly without instantiating it first. Consequently, Classes and function-constructors are differentiated by starting with UpperCamelCase. - -### Code Example ### -```javascript - // for class name we use UpperCamelCase - class SomeClassExample () { - - // for const name we use UPPERCASE - const CONFIG = { - key: 'value' - }; - - // for variables and functions names we use lowerCamelCase - let someVariableExample = 'value'; - function doSomething() { - - } - - } -``` - -

- -## ![✔] 3.7 Prefer const over let. Ditch the var - -**TL;DR:** Using `const` means that once a variable is assigned, it cannot be reassigned. Prefering const will help you to not be tempted to use the same variable for different uses, and make your code clearer. If a variable needs to be reassigned, in a for loop for example, use `let` to declare it. Another important aspect of let is that a variable declared using let is only available in the block scope in which it was defined. `var` is function scoped, not block scoped, and [shouldn't be used in ES6](https://hackernoon.com/why-you-shouldnt-use-var-anymore-f109a58b9b70) now that you have const and let at your disposal. - -**Otherwise:** Debugging becomes way more cumbersome when following a variable that frequently changes. - -🔗 [**Read more: JavaScript ES6+: var, let, or const?** ](https://medium.com/javascript-scene/javascript-es6-var-let-or-const-ba58b8dcde75) - -

- -## ![✔] 3.8 Requires come first, and not inside functions. - -**TL;DR:** Require modules at the beginning of each file, before and outside of any functions. This simple best practice will not only help you easily and quickly tell the dependencies of a file right at the top, but also avoids a couple of potential problems. - -**Otherwise:** Requiers are run syncronously by Node JS. If they are called from withing a function, it may block other requests from being handled at a more critical time. Also, if a required module or any of its own dependencies throw an error and crashes the server, it is best to find out about it as soon as possible, which might not be the case if that module is required from within a function. - -

- -## ![✔] 3.9 Do Require on folders, not directly on files - -**TL;DR:** When developing a module/library in a folder, place an index.js file that exposes the module's -internals so every consumer will pass through it. This will serves as 'interface' to your module and ease -future changes without breaking the contract. - -**Otherwise:** Changing to the internal structure of files or the signature may break the interface with -clients. - -### Code example -```javascript - // Do - module.exports.SMSProvider = require('./SMSProvider'); - module.exports.SMSNumberResolver = require('./SMSNumberResolver'); - - // Avoid - module.exports.SMSProvider = require('./SMSProvider/SMSProvider.js'); - module.exports.SMSNumberResolver = require('./SMSNumberResolver/SMSNumberResolver.js'); -``` - -

- - -## ![✔] 3.10 Use the `===` operator - -**TL;DR:** Prefer the strict equality operator `===` over the weaker abstract equality operator `==`. `==` will compare two variables after converting them to a common type. There is not type conversion in `===`, and both variables must be of the same type to be equal. - -**Otherwise:** Unequal variables might return true when compared with the `==` operator. - -### Code example -```javascript -'' == '0' // false -0 == '' // true -0 == '0' // true - -false == 'false' // false -false == '0' // true - -false == undefined // false -false == null // false -null == undefined // true - -' \t\r\n ' == 0 // true -``` -All statements above will return false if used with `===` - -

- -## ![✔] 3.11 Use Async Await, avoid callbacks - -**TL;DR:** Node 8 LTS now has full support for Async-await. This is a new way of dealing with asyncronous code which supercedes callbacks and promises. Async-await is non blocking, and it makes asynchronous code looks more synchronous. The best gift you can give to your code is using async-await which provides much compact and familiar code syntax like try-catch. - -**Otherwise:** Handling async errors in callback style is probably the fastest way to hell - this style forces to check errors all over, deal with akward code nesting and make it difficult to reason about the code flow. - -🔗[**Read more:** Guide to async await 1.0](https://github.com/yortus/asyncawait) - -

- -## ![✔] 3.12 Use => Arrow Functions - -**TL;DR:** Though it's recommended to use async-await and avoid function parameters, when dealing with older API that accept promises or callbacks - arrow functions makes the code structure more compact and keeps the lexical context of the root function (i.e. 'this'). - -**Otherwise:** Longer code (in ES5 functions) is more prone to bugs and cumbersome to read. - -🔗 [**Read mode: It’s Time to Embrace Arrow Functions**](https://medium.com/javascript-scene/familiarity-bias-is-holding-you-back-its-time-to-embrace-arrow-functions-3d37e1a9bb75) - - -


- -

⬆ Return to top

- - -# `4. Testing And Overall Quality Practices` - -## ![✔] 4.1 At the very least, write API (component) testing - -**TL;DR:** Most projects just don't have any automated testing due to short time tables or often the 'testing project' run out of control and being abandoned. For that reason, prioritize and start with API testing which are the easiest to write and provide more coverage than unit testing (you may even craft API tests without code using tools like [Postman](https://www.getpostman.com/). Afterwards, should you have more resources and time, continue with advanced test types like unit testing, DB testing, performance testing, etc - -**Otherwise:** You may spend long days on writing unit tests to find out that you got only 20% system coverage - -

- -## ![✔] 4.2 Detect code issues with ESLint + specific Node plugin rules - -**TL;DR:** ESLint is the de-facto standard for checking code style, not only to identify nitty-gritty spacing issues but also to detect serious code anti-patterns like developers throwing errors without classification. On top of ESLint standard rules that cover vanilla JS only, add Node-specific plugins like [eslint-plugin-node](https://www.npmjs.com/package/eslint-plugin-node), [eslint-plugin-mocha](https://www.npmjs.com/package/eslint-plugin-mocha) and [eslint-plugin-node-security](https://www.npmjs.com/package/eslint-plugin-security) - -**Otherwise:** Many faulty Node.JS code patterns might escape under the radar. For example, developers might require(variableAsPath) files with a variable given as path which allows attackers to execute any JS script. Node.JS linters can detect such patterns and complain early - - -

- -## ![✔] 4.3 Carefully choose your CI platform (Jenkins vs Rest of the world) - -**TL;DR:** Your continuous integration platform (CICD) will host all the quality tools (e.g test, lint) so it better come with a vibrant echo-system of plugins. [Jenkins](https://jenkins.io/) is the default for many projects as it has the biggest community along with a very powerful platform at the price of complex setup that demands a steep learning curve. Its rivals, online SaaS like [Travis](https://travis-ci.org/) and [CircleCI](https://circleci.com), are much easier to setup without the burden of managing the whole infrastructure. Eventually, it's a trade-off between robustness and speed - choose your side carefully - -**Otherwise:** Choosing some lightweight SaaS vendor might get you blocked once you need some advanced customization. On the other hand, going with Jenkins might burn precious time on infrastructure setup - - -

- -## ![✔] 4.4 Constantly inspect for vulenerable dependencies - -**TL;DR:** Even the most reputable dependencies such as Express have known vulnerabilities. This can get easily tamed using community and commercial tools such as 🔗 [nsp](https://github.com/nodesecurity/nsp) that can be invoked from your CI on every build - -**Otherwise:** Keeping your code clean from vulnerabilities without dedicated tools will require to constantly follow online publications about new threats. Quite tedious - -

- -## ![✔] 4.5 Tag your tests - -**TL;DR:** Different tests must run on different scenarios: quick smoke, IO-less, tests should run when a developer saves or commits a file, full end-to-end tests usually run when a new pull request is submitted, etc. This can be achieved by tagging tests with keywords like #cold #api #sanity so you can grep with your testing harness and invoke the desired subset. For example, this is how you would invoke only the sanity test group with [Mocha](https://mochajs.org/): mocha --grep 'sanity' - -**Otherwise:** Running all the tests, including tests that perform dozens of DB queries, any time a developer makes a small change can be extremly slow and keep developers away for running tests - -

- -## ![✔] 4.6 Check your test coverage, it helps to identify wrong test patterns - -**TL;DR:** Code coverage tools like [Istanbul/NYC ](https://github.com/gotwarlost/istanbul)are great for 3 reasons: it comes for free (no effort is required to benefit this reports), it helps to identify a decrease in testing coverage, and last but least it highlights testing mismatches: by looking at colored code coverage reports you may notice, for example, code areas that are never tested like catch clauses (meaning that tests only invoke the happy paths and not how the app behaves on errors). Set it to fail builds if the coverage falls under a certain threshold - -**Otherwise:** There won't be any automated metric that tells you when large portion of your code is not covered by testing - - - -

- -## ![✔] 4.7 Inspect for outdated packages - -**TL;DR:** Use your preferred tool (e.g. 'npm outdated' or [npm-check-udpates](https://www.npmjs.com/package/npm-check-updates) to detect installed packages which are outdated, inject this check into your CI pipeline and even make a build fail in a severe scenario. For example, a sever scenario might be when an installed package lag by 5 patch commits behind (e.g. local version is 1.3.1 and repository version is 1.3.8) or it is tagged as deprecated by its author - kill the build and prevent deploying this version - -**Otherwise:** Your production will run packages that have been explicitly tagged by their author as risky - -

- -## ![✔] 4.8 Use docker-compose for e2e testing - -**TL;DR:** End to end (e2e) testing which includes live data used to be the weakest link of the CI process as it depends on multiple heavy services like DB. Docker-compose turns this problem into a breeze by crafting production-like environment using a simple text file and easy commands. It allows crafting all the dependent services, DB and isolated network for e2e testing. Last but not least, it can keep a stateless environment that is invoked before each test suite and dies right after - - -**Otherwise:** Without docker-compose teams must maintain a testing DB for each testing environment including developers machines, keep all those DBs in sync so test results won't vary across environments - - -


- -

⬆ Return to top

- -# `5. Going To Production Practices` -## ![✔] 5.1. Monitoring! - -**TL;DR:** Monitoring is a game of finding out issues before our customers do – obviously this should be assigned unprecedented importance. The market is overwhelmed with offers thus consider starting with defining the basic metrics you must follow (my suggestions inside), then go over additional fancy features and choose the solution that tick all boxes. Click ‘The Gist’ below for overview of solutions - -**Otherwise:** Failure === disappointed customers. Simple. - - -🔗 [**Read More: Monitoring!**](/sections/production/monitoring.md) - -

- -## ![✔] 5.2. Increase transparency using smart logging - -**TL;DR:** Logs can be a dumb warehouse of debug statements or the enabler of a beautiful dashboard that tells the story of your app. Plan your logging platform from day 1: how logs are collected, stored and analyzed to ensure that the desired information (e.g. error rate, following an entire transaction through services and servers, etc) can really be extracted - -**Otherwise:** You end-up with a blackbox that is hard to reason about, then you start re-writing all logging statements to add additional information - - -🔗 [**Read More: Increase transparency using smart logging**](/sections/production/smartlogging.md) - -

- -## ![✔] 5.3. Delegate anything possible (e.g. gzip, SSL) to a reverse proxy - -**TL;DR:** Node is awfully bad at doing CPU intensive tasks like gzipping, SSL termination, etc. Instead, use a ‘real’ middleware services like nginx, HAproxy or cloud vendor services - -**Otherwise:** Your poor single thread will keep busy doing networking tasks instead of dealing with your application core and performance will degrade accordingly - - -🔗 [**Read More: Delegate anything possible (e.g. gzip, SSL) to a reverse proxy**](/sections/production/delegatetoproxy.md) - -

- -## ![✔] 5.4. Lock dependencies - -**TL;DR:** Your code must be identical across all environments but amazingly NPM lets dependencies drift across environments be default – when you install packages at various environments it tries to fetch packages’ latest patch version. Overcome this by using NPM config files , .npmrc, that tell each environment to save the exact (not the latest) version of each package. Alternatively, for finer grain control use NPM” shrinkwrap”. *Update: as of NPM5 , dependencies are locked by default. The new package manager in town, Yarn, also got us covered by default - -**Otherwise:** QA will thoroughly test the code and approve a version that will behave differently at production. Even worse, different servers at the same production cluster might run different code - - -🔗 [**Read More: Lock dependencies**](/sections/production/lockdependencies.md) - -

- -## ![✔] 5.5. Guard process uptime using the right tool - -**TL;DR:** The process must go on and get restarted upon failures. For simple scenario, ‘restarter’ tools like PM2 might be enough but in today ‘dockerized’ world – a cluster management tools should be considered as well - -**Otherwise:** Running dozens of instances without clear strategy and too many tools together (cluster management, docker, PM2) might lead to a devops chaos - - -🔗 [**Read More: Guard process uptime using the right tool**](/sections/production/guardprocess.md) - - -

- -## ![✔] 5.6. Utilize all CPU cores - -**TL;DR:** At its basic form, a Node app runs over a single CPU core while as all other are left idle. It’s your duty to replicate the Node process and utilize all CPUs – For small-medium apps you may use Node Cluster or PM2. For a larger app consider replicating the process using some Docker cluster (e.g. K8S, ECS) or deployment scripts that are based on Linux init system (e.g. systemd) - -**Otherwise:** Your app will likely utilize only 25% of its available resources(!) or even less. Note that a typical server has 4 CPU cores or more, naive deployment of Node.JS utilizes only 1 (even using PaaS services like AWS beanstalk!) - - -🔗 [**Read More: Utilize all CPU cores**](/sections/production/utilizecpu.md) - -

- -## ![✔] 5.7. Create a ‘maintenance endpoint’ - -**TL;DR:** Expose a set of system-related information, like memory usage and REPL, etc in a secured API. Although it’s highly recommended to rely on standard and battle-tests tools, some valuable information and operations are easier done using code - -**Otherwise:** You’ll find that you’re performing many “diagnostic deploys” – shipping code to production only to extract some information for diagnostic purposes - - -🔗 [**Read More: Create a ‘maintenance endpoint’**](/sections/production/createmaintenanceendpoint.md) - -

- -## ![✔] 5.8. Discover errors and downtime using APM products - -**TL;DR:** Monitoring and performance products (a.k.a APM) proactively gauge codebase and API so they can auto-magically go beyond traditional monitoring and measure the overall user-experience across services and tiers. For example, some APM products can highlight a transaction that loads too slow on the end-users side while suggesting the root cause - -**Otherwise:** You might spend great effort on measuring API performance and downtimes, probably you’ll never be aware which is your slowest code parts under real world scenario and how these affects the UX - - -🔗 [**Read More: Discover errors and downtime using APM products**](/sections/production/apmproducts.md) - - -

- - -## ![✔] 5.9. Make your code production-ready - -**TL;DR:** Code with the end in mind, plan for production from day 1. This sounds a bit vague so I’ve compiled inside (click Gist below) few development tips that are closely related to production maintenance - -**Otherwise:** A world champion IT/devops guy won’t save a system that is badly written - - -🔗 [**Read More: Make your code production-ready**](/sections/production/productoncode.md) - -

- -## ![✔] 5.10. Measure and guard the memory usage - -**TL;DR:** Node.js has controversial relationships with memory: the v8 engine has soft limits on memory usage (1.4GB) and there are known paths to leaks memory in Node’s code – thus watching Node’s process memory is a must. In small apps you may gauge memory periodically using shell commands but in medium-large app consider baking your memory watch into a robust monitoring system - -**Otherwise:** Your process memory might leak a hundred megabytes a day like happened in Wallmart - - -🔗 [**Read More: Measure and guard the memory usage**](/sections/production/measurememory.md) - -

- - -## ![✔] 5.11. Get your frontend assets out of Node - -**TL;DR:** Serve frontend content using dedicated middleware (nginx, S3, CDN) because Node performance really get hurts when dealing with many static files due to its single threaded model - -**Otherwise:** Your single Node thread will keep busy streaming hundreds of html/images/angular/react files instead of allocating all its resources for the task it was born for – serving dynamic content - - -🔗 [**Read More: Get your frontend assets out of Node**](/sections/production/frontendout.md) - -

- - -## ![✔] 5.12. Be stateless, kill your Servers almost every day - -**TL;DR:** Store any type of data (e.g. users session, cache, uploaded files) within external data stores. Consider ‘killing’ your servers periodically or use ‘serverless’ platform (e.g. AWS Lambda) that explicitly enforces a stateless behavior - -**Otherwise:** Failure at a given server will result in application downtime instead of a just killing a faulty machine. Moreover, scaling-out elasticity will get more challenging due to the reliance on a specific server - - -🔗 [**Read More: Be stateless, kill your Servers almost every day**](/sections/production/bestateless.md) - - -

- - -## ![✔] 5.13. Use tools that automatically detect vulnerabilities - -**TL;DR:** Even the most reputable dependencies such as Express have known vulnerabilities from time to time that put a system at risk. This can get easily tamed using community and commercial tools that constantly check for vulnerabilities and warn (locally or at GitHub), some can even patch them immediately - -**Otherwise:** Otherwise: Keeping your code clean from vulnerabilities without dedicated tools will require to constantly follow online publications about new threats. Quite tedious - - -🔗 [**Read More: Use tools that automatically detect vulnerabilities**](/sections/production/detectvulnerabilities.md) - -

- - -## ![✔] 5.14. Assign ‘TransactionId’ to each log statement - -**TL;DR:** Assign the same identifier, transaction-id: {some value}, to each log entry within a single request. Then when inspecting errors in logs, easily conclude what happened before and after. Unfortunately, this is not easy to achieve in Node due its async nature, see code examples inside - -**Otherwise:** Looking at a production error log without the context – what happened before – makes it much harder and slower to reason about the issue - - -🔗 [**Read More: Assign ‘TransactionId’ to each log statement**](/sections/production/assigntransactionid.md) - -

- - -## ![✔] 5.15. Set NODE_ENV=production - -**TL;DR:** Set the environment variable NODE_ENV to ‘production’ or ‘development’ to flag whether production optimizations should get activated – many NPM packages determining the current environment and optimize their code for production - -**Otherwise:** Omitting this simple property might greatly degrade performance. For example, when using Express for server side rendering omitting NODE_ENV makes the slower by a factor of three! - - -🔗 [**Read More: Set NODE_ENV=production**](/sections/production/setnodeenv.md) - - -

- - -## ![✔] 5.16. Design automated, atomic and zero-downtime deployments - -**TL;DR:** Researches show that teams who perform many deployments – lowers the probability of severe production issues. Fast and automated deployments that don’t require risky manual steps and service downtime significantly improves the deployment process. You should probably achieve that using Docker combined with CI tools as they became the industry standard for streamlined deployment - -**Otherwise:** Long deployments -> production down time & human-related error -> team unconfident and in making deployment -> less deployments and features - -


- -

⬆ Return to top

- -# `Security Practices` - -## our contributirs working on this section, would you like to join? - -


-# `Performance Practices` - -## our contributirs working on this section, would you like to join? - - -


-# Contributors -## `Yoni Goldberg` -Developer & consultant, Backend expert, JavaScript enthusiast, focused on Node.JS. Many of the bullets was first published on his blog post [http://www.goldbergyoni.com](http://www.goldbergyoni.com) - -## `Ido Richter` -👨‍💻 Software engineer, 🌐 web developer, 🤖 emojis enthusiast. diff --git a/translations/spanish/sections/errorhandling/apmproducts.md b/translations/spanish/sections/errorhandling/apmproducts.md deleted file mode 100644 index 9dc41475a..000000000 --- a/translations/spanish/sections/errorhandling/apmproducts.md +++ /dev/null @@ -1,29 +0,0 @@ -# Discover errors and downtime using APM products - - -### One Paragraph Explainer - -Exception != Error. Traditional error handling assumes the existence of Exception but application errors might come in the form of slow code paths, API downtime, lack of computational resources and more. This is where APM products come handy as they allow with minimal setup to detect a wide variety of ‘burried’ issues proactively. Among the common features of APM products are – alerting when HTTP API returns errors, detect when API response time drops below some threshold, detection of ‘code smells’, monitor server resources, operational intelligence dashboard with IT metrics and many other useful features. Most vendors offer a free plan. - -### Wikipedia about APM - -In the fields of information technology and systems management, Application Performance Management (APM) is the monitoring and management of performance and availability of software applications. APM strives to detect and diagnose complex application performance problems to maintain an expected level of service. APM is “the translation of IT metrics into business meaning ([i.e.] value) -Major products and segments - -### Understanding the APM marketplace - -APM products constitues 3 major segments: - -1. Website or API monitoring – external services that constantly monitor uptime and performance via HTTP requests. Can be setup in few minutes. Following are few selected contenders: Pingdom, Uptime Robot, and New Relic - -2. Code instrumetation – products family which require to embed an agent within the application to benefit feature slow code detection, exceptions statistics, performance monitoring and many more. Following are few selected contenders: New Relic, App Dynamics - -3. Operational intelligence dashboard – these line of products are focused on fasciliatitating the ops team with metrics and curated content that helps to easily stay on top of application peroformance. This is usually involves aggregating multiple sources of information (application logs, DB logs, servers log, etc) and upfront dashboard design work. Following are few selected contenders: Datadog, Splunk - - - - ### Example: UpTimeRobot.Com – Website monitoring dashboard -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/uptimerobot.jpg "Website monitoring dashboard") - - ### Example: AppDynamic.Com – end to end monitoring combined with code instrumentation -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/app-dynamics-dashboard.png "end to end monitoring combined with code instrumentation") \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/asyncerrorhandling.md b/translations/spanish/sections/errorhandling/asyncerrorhandling.md deleted file mode 100644 index f1f638523..000000000 --- a/translations/spanish/sections/errorhandling/asyncerrorhandling.md +++ /dev/null @@ -1,56 +0,0 @@ -# Use Async-Await or promises for async error handling - - -### One Paragraph Explainer - -Callbacks don’t scale as they are not familiar to most programmers, force to check errors all over, deal with nasty code nesting and make it difficult to reason about the code flow. Promise libraries like BlueBird, async, and Q pack a standard code style using RETURN and THROW to control the program flow. Specifically, they support the favorite try-catch error handling style which allows freeing the main code path from dealing with errors in every function - - -### Code Example – using promises to catch errors - - -```javascript -doWork() - .then(doWork) - .then(doOtherWork) - .then((result) => doWork) - .catch((error) => throw error) - .then(verify); -``` - -### Anti pattern code example – callback style error handling - -```javascript -getData(someParameter, function(err, result){ - if(err != null) - //do something like calling the given callback function and pass the error - getMoreData(a, function(err, result){ - if(err != null) - //do something like calling the given callback function and pass the error - getMoreData(b, function(c){ - getMoreData(d, function(e){ - if(err != null) - //you get the idea?  - }); - }); -``` - -### Blog Quote: "We have a problem with promises" - From the blog pouchdb.com - - > ……And in fact, callbacks do something even more sinister: they deprive us of the stack, which is something we usually take for granted in programming languages. Writing code without a stack is a lot like driving a car without a brake pedal: you don’t realize how badly you need it, until you reach for it and it’s not there. The whole point of promises is to give us back the language fundamentals we lost when we went async: return, throw, and the stack. But you have to know how to use promises correctly in order to take advantage of them. - -### Blog Quote: "The promises method is much more compact" - From the blog gosquared.com - - > ………The promises method is much more compact, clearer and quicker to write. If an error or exception occurs within any of the ops it is handled by the single .catch() handler. Having this single place to handle all errors means you don’t need to write error checking for each stage of the work. - -### Blog Quote: "Promises are native ES6, can be used with generators" - From the blog StrongLoop - - > ….Callbacks have a lousy error-handling story. Promises are better. Marry the built-in error handling in Express with promises and significantly lower the chances of an uncaught exception. Promises are native ES6, can be used with generators, and ES7 proposals like async/await through compilers like Babel - -### Blog Quote: "All those regular flow control constructs you are used to are completely broken" -From the blog Benno’s - - > ……One of the best things about asynchronous, callback based programming is that basically all those regular flow control constructs you are used to are completely broken. However, the one I find most broken is the handling of exceptions. Javascript provides a fairly familiar try…catch construct for dealing with exceptions. The problems with exceptions is that they provide a great way of short-cutting errors up a call stack, but end up being completely useless of the error happens on a different stack… diff --git a/translations/spanish/sections/errorhandling/catchunhandledpromiserejection.md b/translations/spanish/sections/errorhandling/catchunhandledpromiserejection.md deleted file mode 100644 index 94dc9e7d1..000000000 --- a/translations/spanish/sections/errorhandling/catchunhandledpromiserejection.md +++ /dev/null @@ -1,58 +0,0 @@ -# Catch unhandled promise rejections -

- - -### One Paragraph Explainer - -Typically, most of modern Node.JS/Express application code runs within promises – whether within the .then handler, a function callback or in a catch block. Suprisingly, unless a developer remembered to add a .catch clause, errors thrown at these places disappear, even not by app.uncaughtException. Recent versions of Node added a warning message when an unhandled rejection pops, though this might help to notice when things go wrong but it's obviously not a proper error handling. The straighforward solution is to never forget adding .catch clause within each promise chain call and redirect to a centralized error handler. However building your error handling strategy only on developer’s discpline is somewhat fragile. Consequently, it’s highly recommended using a graceful fallback and subscribe to process.on(‘unhandledRejection’, callback) – this will ensure that any promise error, if not handled locally, will get its treatment. - -

- -### Code example: these errors will not get caught by any error handler (except unhandledRejection) - -```javascript -DAL.getUserById(1).then((johnSnow) => -{ - //this error will just vanish - if(johnSnow.isAlive == false) - throw new Error('ahhhh'); -}); - -``` -

-### Code example: Catching unresolved and rejected promises - -```javascript -process.on('unhandledRejection', function (reason, p) { - //I just caught an unhandled promise rejection, since we already have fallback handler for unhandled errors (see below), let throw and let him handle that - throw reason; -}); -process.on('uncaughtException', function (error) { - //I just received an error that was never handled, time to handle it and then decide whether a restart is needed - errorManagement.handler.handleError(error); - if (!errorManagement.handler.isTrustedError(error)) - process.exit(1); -}); - -``` -

-### Blog Quote: "If you can make a mistake, at some point you will" - From the blog James Nelson - - > Let’s test your understanding. Which of the following would you expect to print an error to the console? -Promise.resolve(‘promised value’).then(function() { -throw new Error(‘error’); -}); - -Promise.reject(‘error value’).catch(function() { -throw new Error(‘error’); -}); - -new Promise(function(resolve, reject) { -throw new Error(‘error’); -}); - -I don’t know about you, but my answer is that I’d expect all of them to print an error. However, the reality is that a number of modern JavaScript environments won’t print errors for any of them.The problem with being human is that if you can make a mistake, at some point you will. Keeping this in mind, it seems obvious that we should design things in such a way that mistakes hurt as little as possible, and that means handling errors by default, not discarding them -Close GIST window Skip to toolbar -About WordPress - diff --git a/translations/spanish/sections/errorhandling/centralizedhandling.md b/translations/spanish/sections/errorhandling/centralizedhandling.md deleted file mode 100644 index d9c5db3d8..000000000 --- a/translations/spanish/sections/errorhandling/centralizedhandling.md +++ /dev/null @@ -1,83 +0,0 @@ -# Handle errors centrally, through but not within middleware - - -### One Paragraph Explainer - -Without one dedicated object for error handling, greater are the chances of important errors hiding under the radar due to improper handling. The error handler object is responsible for making the error visible, for example by writing to a well-formatted logger, sending events to some monitoring product or email to admin directly. A typical error flow might be: Some module throws an error -> API router catches the error -> it propagates the error to the middleware (e.g. Express, KOA) who is responsible for catching errors -> a centralized error handler is called -> the middleware is being told whether this error is untrusted error (not operational) so it can restart the app gracefully. Note that it’s a common, yet wrong, practice to handle error within Express middleware – doing so will not cover errors that are thrown in non-web interfaces - - - -### Code Example – a typical error flow - -```javascript -//DAL layer, we don't handle errors here -DB.addDocument(newCustomer, (error, result) => { - if (error) - throw new Error("Great error explanation comes here", other useful parameters) -}); - -//API route code, we catch both sync and async errors and forward to the middleware -try { - customerService.addNew(req.body).then(function (result) { - res.status(200).json(result); - }).catch((error) => { - next(error) - }); -} -catch (error) { - next(error); -} - -//Error handling middleware, we delegate the handling to the centrzlied error handler -app.use(function (err, req, res, next) { - errorHandler.handleError(err).then((isOperationalError) => { - if (!isOperationalError) - next(err); - }); -}); - -``` - -### Code example – handling errors within a dedicated object - -```javascript -module.exports.handler = new errorHandler(); - -function errorHandler(){ - this.handleError = function (error) { - return logger.logError(err).then(sendMailToAdminIfCritical).then(saveInOpsQueueIfCritical).then(determineIfOperationalError); - } - -``` - -### Code Example – Anti Pattern: handling errors within the middleware - -```javascript -//middleware handling the error directly, who will handle Cron jobs and testing errors? -app.use(function (err, req, res, next) { - logger.logError(err); - if(err.severity == errors.high) - mailer.sendMail(configuration.adminMail, "Critical error occured", err); - if(!err.isOperational) - next(err); -}); - -``` - -### Blog Quote: "Sometimes lower levels can’t do anything useful except propagate the error to their caller" - From the blog Joyent, ranked 1 for the keywords “Node.JS error handling” - - > …You may end up handling the same error at several levels of the stack. This happens when lower levels can’t do anything useful except propagate the error to their caller, which propagates the error to its caller, and so on. Often, only the top-level caller knows what the appropriate response is, whether that’s to retry the operation, report an error to the user, or something else. But that doesn’t mean you should try to report all errors to a single top-level callback, because that callback itself can’t know in what context the error occurred… - - -### Blog Quote: "Handling each err individually would result in tremendous duplication" - From the blog JS Recipes, ranked 17 for the keywords “Node.JS error handling” - - > ……In Hackathon Starter api.js controller alone, there are over 79 occurences of error objects. Handling each err individually would result in tremendous amount of code duplication. The next best thing you can do is to delegate all error handling logic to an Express middleware… - - -### Blog Quote: "HTTP errors have no place in your database code" - From the blog Daily JS, ranked 14 for the keywords “Node.JS error handling” - - > ……You should set useful properties in error objects, but use such properties consistently. And, don’t cross the streams: HTTP errors have no place in your database code. Or for browser developers, Ajax errors have a place in code that talks to the server, but not code that processes Mustache templates… - diff --git a/translations/spanish/sections/errorhandling/documentingusingswagger.md b/translations/spanish/sections/errorhandling/documentingusingswagger.md deleted file mode 100644 index 6a662aa6f..000000000 --- a/translations/spanish/sections/errorhandling/documentingusingswagger.md +++ /dev/null @@ -1,15 +0,0 @@ -# Document API errors using Swagger - - -### One Paragraph Explainer - -REST APIs return results using HTTP code, it’s absolutely required for the API user to be aware not only about the API schema but also about potential errors – the caller may then catch an error and tactfully handle it. For example, your API documentation might state in advanced that HTTP status 409 is returned when the customer name already exist (assuming the API register new users) so the caller can correspondingly render the best UX for the given situation. Swagger is a standard that defines the schema of API documentation with eco-system of tools that allow creating documentation easily online, see prtint screens below - -### Blog Quote: "You have to tell your callers what errors can happen" -From the blog Joyent, ranked 1 for the keywords “Node.JS logging” - - > We’ve talked about how to handle errors, but when you’re writing a new function, how do you deliver errors to the code that called your function? …If you don’t know what errors can happen or don’t know what they mean, then your program cannot be correct except by accident. So if you’re writing a new function, you have to tell your callers what errors can happen and what they mean… - - - ### Useful Tool: Swagger Online Documentation Creator -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/swaggerDoc.png "API error handling") \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/failfast.md b/translations/spanish/sections/errorhandling/failfast.md deleted file mode 100644 index 07c24e64f..000000000 --- a/translations/spanish/sections/errorhandling/failfast.md +++ /dev/null @@ -1,50 +0,0 @@ -# Fail fast, validate arguments using a dedicated library - - -### One Paragraph Explainer - -We all know how checking arguments and failing fast is important to avoid hidden bugs (see anti-pattern code example below). If not, read about explicit programming and defensive programming. In reality, we tend to avoid it due to the annoyance of coding it (e.g. think of validating hierarchical JSON object with fields like email and dates) – libraries like Joi and Validator turns this tedious task into a breeze. - -### Wikipedia: Defensive Programming - -Defensive programming is an approach to improve software and source code, in terms of: General quality – reducing the number of software bugs and problems. Making the source code comprehensible – the source code should be readable and understandable so it is approved in a code audit. Making the software behave in a predictable manner despite unexpected inputs or user actions. - - - -### Code example: validating complex JSON input using ‘Joi’ - -```javascript -var memberSchema = Joi.object().keys({ - password: Joi.string().regex(/^[a-zA-Z0-9]{3,30}$/), - birthyear: Joi.number().integer().min(1900).max(2013), - email: Joi.string().email() -}); - -function addNewMember(newMember) -{ - //assertions comes first - if(Joi.validate(newMember), memberSchema, (err, value) => throw Error("Invalid input)); - //other logic here -} - -``` - -### Anti-pattern: no validation yields nasty bugs - -```javascript -//if the discount is positive let's then redirect the user to pring his discount coupons -function redirectToPrintDiscount(httpResponse, member, discount) -{ - if(discount != 0) - httpResponse.redirect(`/discountPrintView/${member.id}`); -} - -redirectToPrintDiscount(httpResponse, someMember); -//forgot to pass the parameter discount, why the heck was the user redirected to the discount screen? - -``` - -### Blog Quote: "You should throw these errors immediately" - From the blog: Joyent - - > A degenerate case is where someone calls an asynchronous function but doesn’t pass a callback. You should throw these errors immediately, since the program is broken and the best chance of debugging it involves getting at least a stack trace and ideally a core file at the point of the error. To do this, we recommend validating the types of all arguments at the start of the function. \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/monitoring.md b/translations/spanish/sections/errorhandling/monitoring.md deleted file mode 100644 index 5f9f983ad..000000000 --- a/translations/spanish/sections/errorhandling/monitoring.md +++ /dev/null @@ -1,18 +0,0 @@ -# Title - - -### One Paragraph Explainer - -> At the very basic level, monitoring means you can *easily identify when bad things happen at production. For example, by getting notified by email or Slack. The challenge is to choose the right set of tools that will satisfy your requirements without breaking your bank. May I suggest, start with defining the core set of metrics that must be watched to ensure a healthy state – CPU, server RAM, Node process RAM (less than 1.4GB), the amount of errors in the last minute, number of process restarts, average response time. Then go over some advanced features you might fancy and add to your wish list. Some examples of luxury monitoring feature: DB profiling, cross-service measuring (i.e. measure business transaction), frontend integration, expose raw data to custom BI clients, Slack notifications and many others. - -Achieving the advanced features demands lengthy setup or buying a commercial product such as Datadog, newrelic and a like. Unfortunately, achieving also the basics is not a walk in the park as some metrics are hardware-related (CPU) and others live within the node process (internal errors) thus All the straightforward tools require some additional setup. For example, cloud vendor monitoring solutions (e.g. AWS CloudWatch, Google StackDriver) will tell you immediately about the hardware metric but nothing about the internal app behavior. On the other end, Log-based solutions such as ElaticSeatch lack by default the hardware view. The solution is to augment your choice with missing metrics, for example, a popular choice is sending application logs to Elastic stack and configure some additional agent (e.g. Beat) to share with it hardware-related information to get the full picture. - - -### Blog Quote: "We have a problem with promises" - From the blog pouchdb.com, ranked 11 for the keywords “Node Promises” - - > … We recommend you to watch these signals for all of your services: Error Rate: Because errors are user facing and immediately affect your customers. -Response time: Because the latency directly affects your customers and business. -Throughput: The traffic helps you to understand the context of increased error rates and the latency too. -Saturation: It tells how “full” your service is. If the CPU usage is 90%, can your system handle more traffic? -… diff --git a/translations/spanish/sections/errorhandling/operationalvsprogrammererror.md b/translations/spanish/sections/errorhandling/operationalvsprogrammererror.md deleted file mode 100644 index 6eee0ee84..000000000 --- a/translations/spanish/sections/errorhandling/operationalvsprogrammererror.md +++ /dev/null @@ -1,51 +0,0 @@ -# Distinguish operational vs programmer errors - -### One Paragraph Explainer - -Distinguishing the following two error types will minimize your app downtime and helps avoid crazy bugs: Operational errors refer to situations where you understand what happened and the impact of it – for example, a query to some HTTP service failed due to connection problem. On the other hand, programmer errors refer to cases where you have no idea why and sometimes where an error came from – it might be some code that tried to read undefined value or DB connection pool that leaks memory. Operational errors are relatively easy to handle – usually logging the error is enough. Things become hairy when a programmer error pop-up, the application might be in an inconsistent state and there’s nothing better you can do than restart gracefully - - - -### Code Example – marking an error as operational (trusted) - -```javascript -//marking an error object as operational -var myError = new Error("How can I add new product when no value provided?"); -myError.isOperational = true; - -//or if you're using some centralized error factory (see other examples at the bullet "Use only the built-in Error object") -function appError(commonType, description, isOperational) { - Error.call(this); - Error.captureStackTrace(this); - this.commonType = commonType; - this.description = description; - this.isOperational = isOperational; -}; - -throw new appError(errorManagement.commonErrors.InvalidInput, "Describe here what happened", true); - -``` - -### Blog Quote: "Programmer errors are bugs in the program" -From the blog Joyent, ranked 1 for the keywords “Node.JS error handling” - - > …The best way to recover from programmer errors is to crash immediately. You should run your programs using a restarter that will automatically restart the program in the event of a crash. With a restarter in place, crashing is the fastest way to restore reliable service in the face of a transient programmer error… - - ### Blog Quote: "No safe way to leave without creating some undefined brittle state" -From Node.JS official documentation - - > …By the very nature of how throw works in JavaScript, there is almost never any way to safely “pick up where you left off”, without leaking references, or creating some other sort of undefined brittle state. The safest way to respond to a thrown error is to shut down the process. Of course, in a normal web server, you might have many connections open, and it is not reasonable to abruptly shut those down because an error was triggered by someone else. The better approach is to send an error response to the request that triggered the error, while letting the others finish in their normal time, and stop listening for new requests in that worker. - - - ### Blog Quote: "Otherwise you risk the state of your application" -From the blog debugable.com, ranked 3 for the keywords “Node.JS uncaught exception” - - > …So, unless you really know what you are doing, you should perform a graceful restart of your service after receiving an “uncaughtException” exception event. Otherwise you risk the state of your application, or that of 3rd party libraries to become inconsistent, leading to all kinds of crazy bugs… - - ### Blog Quote: "Blog Quote: There are three schools of thoughts on error handling" -From the blog: JS Recipes - - > …There are primarily three schools of thoughts on error handling: -1. Let the application crash and restart it. -2. Handle all possible errors and never crash. -3. Balanced approach between the two diff --git a/translations/spanish/sections/errorhandling/shuttingtheprocess.md b/translations/spanish/sections/errorhandling/shuttingtheprocess.md deleted file mode 100644 index dc488c6df..000000000 --- a/translations/spanish/sections/errorhandling/shuttingtheprocess.md +++ /dev/null @@ -1,54 +0,0 @@ -# Shut the process gracefully when a stranger comes to town - - -### One Paragraph Explainer - -Somewhere within your code, an error handler object is responsible for deciding how to proceed when an error comes in – if the error is trusted (i.e. operational error, see further explanation within best practice #3) then writing to log file might be enough. Things get hairy if the error is not familiar – this means that some component might be in a fault state and all future requests are subject to failure. For example, assuming a singleton, stateful token issuer service that threw an exception and lost its state – from now it might behave unexpectedly and cause all requests to fail. Under this scenario, kill the process and use a ‘Restarter tool’ (like Forever, PM2, etc) to start with a clean slate. - - - -### Code example: deciding whether to crash - -```javascript -//deciding whether to crash when an uncaught exception arrives -//Assuming developers mark known operational errors with error.isOperational=true, read best practice #3 -process.on('uncaughtException', function(error) { - errorManagement.handler.handleError(error); - if(!errorManagement.handler.isTrustedError(error)) - process.exit(1) -}); - - -//centralized error handler encapsulates error-handling related logic -function errorHandler(){ - this.handleError = function (error) { - return logger.logError(err).then(sendMailToAdminIfCritical).then(saveInOpsQueueIfCritical).then(determineIfOperationalError); - } - - this.isTrustedError = function(error) - { - return error.isOperational; - } - -``` - - -### Blog Quote: "The best way is to crash" - FFrom the blog Joyent - - > …The best way to recover from programmer errors is to crash immediately. You should run your programs using a restarter that will automatically restart the program in the event of a crash. With a restarter in place, crashing is the fastest way to restore reliable service in the face of a transient programmer error… - - -### Blog Quote: "There are three schools of thoughts on error handling" - From the blog: JS Recipes - - > …There are primarily three schools of thoughts on error handling: -1. Let the application crash and restart it. -2. Handle all possible errors and never crash. -3. Balanced approach between the two - - -### Blog Quote: "No safe way to leave without creating some undefined brittle state" -From Node.JS official documentation - - > …By the very nature of how throw works in JavaScript, there is almost never any way to safely “pick up where you left off”, without leaking references, or creating some other sort of undefined brittle state. The safest way to respond to a thrown error is to shut down the process. Of course, in a normal web server, you might have many connections open, and it is not reasonable to abruptly shut those down because an error was triggered by someone else. The better approach is to send an error response to the request that triggered the error, while letting the others finish in their normal time, and stop listening for new requests in that worker. \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/testingerrorflows.md b/translations/spanish/sections/errorhandling/testingerrorflows.md deleted file mode 100644 index 0684cab7f..000000000 --- a/translations/spanish/sections/errorhandling/testingerrorflows.md +++ /dev/null @@ -1,37 +0,0 @@ -# Test error flows using your favorite test framework - - -### One Paragraph Explainer - -Testing ‘happy’ paths is no better than testing failures. Good testing code coverage demands to test exceptional paths. Otherwise, there is no trust that exceptions are indeed handled correctly. Every unit testing framework, like Mocha & Chai, has a support for exception testing (code examples below). If you find it tedious to test every inner function and exception – you may settle with testing only REST API HTTP errors. - - - -### Code example: ensuring the right exception is thrown using Mocha & Chai - -```javascript -describe("Facebook chat", () => { - it("Notifies on new chat message", () => { - var chatService = new chatService(); - chatService.participants = getDisconnectedParticipants(); - expect(chatService.sendMessage.bind({message: "Hi"})).to.throw(ConnectionError); - }); -}); - -``` - -### Code example: ensuring API returns the right HTTP error code - -```javascript -it("Creates new Facebook group", function (done) { - var invalidGroupInfo = {}; - httpRequest({method: 'POST', uri: "facebook.com/api/groups", resolveWithFullResponse: true, body: invalidGroupInfo, json: true - }).then((response) => { - //oh no if we reached here than no exception was thrown - }).catch(function (response) { - expect(400).to.equal(response.statusCode); - done(); - }); - }); - -``` \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/usematurelogger.md b/translations/spanish/sections/errorhandling/usematurelogger.md deleted file mode 100644 index 22b206eb0..000000000 --- a/translations/spanish/sections/errorhandling/usematurelogger.md +++ /dev/null @@ -1,51 +0,0 @@ -# Use a mature logger to increase errors visibility - - -### One Paragraph Explainer - -We all loovve console.log but obviously a reputable and persisted Logger like Winston, Bunyan or L4JS is mandatory for serious projects. A set of practices and tools will help to reason about errors much quicker – (1) log frequently using different levels (debug, info, error), (2) when logging, provide contextual information as JSON objects, see example below. (3) watch and filter logs using a log querying API (built-in in most loggers) or a log viewer software -(4) Expose and curate log statement for the operation team using operational intelligence tool like Splunk - - - -### Code Example – Winston Logger in action - -```javascript -//your centralized logger object -var logger = new winston.Logger({ - level: 'info', - transports: [ - new (winston.transports.Console)(), - new (winston.transports.File)({ filename: 'somefile.log' }) - ] - }); - -//custom code somewhere using the logger -logger.log('info', 'Test Log Message with some parameter %s', 'some parameter', { anything: 'This is metadata' }); - -``` - -### Code Example – Querying the log folder (searching for entries) - -```javascript -var options = { - from: new Date - 24 * 60 * 60 * 1000, until: new Date, limit: 10, start: 0, - order: 'desc', fields: ['message'] - }; - - - // Find items logged between today and yesterday. - winston.query(options, function (err, results) { - //callback with results - }); - -``` - -### Blog Quote: "Logger Requirements" - From the blog Strong Loop - - > Lets identify a few requirements (for a logger): -1. Time stamp each log line. This one is pretty self explanatory – you should be able to tell when each log entry occured. -2. Logging format should be easily digestible by humans as well as machines. -3. Allows for multiple configurable destination streams. For example, you might be writing trace logs to one file but when an error is encountered, write to the same file, then into error file and send an email at the same time… - \ No newline at end of file diff --git a/translations/spanish/sections/errorhandling/useonlythebuiltinerror.md b/translations/spanish/sections/errorhandling/useonlythebuiltinerror.md deleted file mode 100644 index deb372b10..000000000 --- a/translations/spanish/sections/errorhandling/useonlythebuiltinerror.md +++ /dev/null @@ -1,78 +0,0 @@ -# Use only the built-in Error object - - -### One Paragraph Explainer - -The permissive nature of JS along with its variety code-flow options (e.g. EventEmitter, Callbacks, Promises, etc) pushes to great variance in how developers raise errors – some use strings, other define their own custom types. Using Node.JS built-in Error object helps to keep uniformity within your code and with 3rd party libraries, it also preserves significant information like the StackTrace. When raising the exception, it’s usually a good practice to fill it with additional contextual properties like the error name and the associated HTTP error code. To achieve this uniformity and practices, consider extending the Error object with additional properties, see code example below -Blog Quote: “I don’t see the value in having lots of different types” -From the blog Ben Nadel, ranked 5 for the keywords “Node.JS error object” -…”Personally, I don’t see the value in having lots of different types of error objects – JavaScript, as a language, doesn’t seem to cater to Constructor-based error-catching. As such, differentiating on an object property seems far easier than differentiating on a Constructor type… - - - -### Code Example – doing it right - -```javascript -//throwing an Error from typical function, whether sync or async - if(!productToAdd) - throw new Error("How can I add new product when no value provided?"); - -//'throwing' an Error from EventEmitter -const myEmitter = new MyEmitter(); -myEmitter.emit('error', new Error('whoops!')); - -//'throwing' an Error from a Promise - return new promise(function (resolve, reject) { - Return DAL.getProduct(productToAdd.id).then((existingProduct) =>{ - if(existingProduct != null) - reject(new Error("Why fooling us and trying to add an existing product?")); - -``` - -### Code example – Anti Pattern - -```javascript -//throwing a String lacks any stack trace information and other important properties -if(!productToAdd) - throw ("How can I add new product when no value provided?"); - -``` - -### Code example – doing it even better - -```javascript -//centralized error object that derives from Node’s Error -function appError(name, httpCode, description, isOperational) { - Error.call(this); - Error.captureStackTrace(this); - this.name = name; - //...other properties assigned here -}; - -appError.prototype.__proto__ = Error.prototype; - -module.exports.appError = appError; - -//client throwing an exception -if(user == null) - throw new appError(commonErrors.resourceNotFound, commonHTTPErrors.notFound, "further explanation", true) -``` - - -### Blog Quote: "A string is not an error" -From the blog devthought.com, ranked 6 for the keywords “Node.JS error object” - - > …passing a string instead of an error results in reduced interoperability between modules. It breaks contracts with APIs that might be performing instanceof Error checks, or that want to know more about the error. Error objects, as we’ll see, have very interesting properties in modern JavaScript engines besides holding the message passed to the constructor… -Blog Quote: “All JavaScript and System errors raised by Node.js inherit from Error” -From Node.JS official documentation -…All JavaScript and System errors raised by Node.js inherit from, or are instances of, the standard JavaScript Error class and are guaranteed to provide at least the properties available on that class. A generic JavaScript Error object that does not denote any specific circumstance of why the error occurred. Error objects capture a “stack trace” detailing the point in the code at which the Error was instantiated, and may provide a text description of the error.All errors generated by Node.js, including all System and JavaScript errors, will either be instances of, or inherit from, the Error class… - -### Blog Quote: "Inheriting from Error doesn’t add too much value" -From the blog machadogj - - > …One problem that I have with the Error class is that is not so simple to extend. Of course you can inherit the class and create your own Error classes like HttpError, DbError, etc. However that takes time, and doesn’t add too much value unless you are doing something with types. Sometimes, you just want to add a message, and keep the inner error, and sometimes you might want to extend the error with parameters, and such… - - ### Blog Quote: "All JavaScript and System errors raised by Node.js inherit from Error" -From Node.JS official documentation - - > ……All JavaScript and System errors raised by Node.js inherit from, or are instances of, the standard JavaScript Error class and are guaranteed to provide at least the properties available on that class. A generic JavaScript Error object that does not denote any specific circumstance of why the error occurred. Error objects capture a “stack trace” detailing the point in the code at which the Error was instantiated, and may provide a text description of the error.All errors generated by Node.js, including all System and JavaScript errors, will either be instances of, or inherit from, the Error class… diff --git a/translations/spanish/sections/production/apmproducts.md b/translations/spanish/sections/production/apmproducts.md deleted file mode 100644 index 9d464f1c7..000000000 --- a/translations/spanish/sections/production/apmproducts.md +++ /dev/null @@ -1,27 +0,0 @@ -# Sure user experience with APM products - -

- - -### One Paragraph Explainer - -APM, application performance monitoring refers to a familiy of products that aims to monitor application performance from end to end, also from the customer persepective. While traditional monitoring solutions focuses on Exceptions and standlone technical metrics (e.g. error tracking, slow server endpoints, etc), in real world our app might create disappointed users without any code exceptions, for example if some middleware service performed real slow. APM products measure the user experience from end to end, for example, given a system that encompass frontend UI and multiple distirbuted services – some APM products can tell how fast a transaction that spans multiple tiers last. It can tell whether the user experience is solid and point to the problem. This attractive offering comes with a relativelly high price tag hence it’s recommended for large-scale and complex products that require to go beyond straightforwd monitoring. - -

- - -### APM example – a commercial product that visualize cross-service app performance - -![APM example](/assets/images/apm1.png "APM example") - -

- -### APM example – a commercial product that emphasize the user experience score - -![APM example](/assets/images/apm2.png "APM example") - -

- -### APM example – a commercial product that highlights slow code paths - -![APM example](/assets/images/apm3.png "APM example") diff --git a/translations/spanish/sections/production/assigntransactionid.md b/translations/spanish/sections/production/assigntransactionid.md deleted file mode 100644 index 71402f692..000000000 --- a/translations/spanish/sections/production/assigntransactionid.md +++ /dev/null @@ -1,41 +0,0 @@ -# Assign ‘TransactionId’ to each log statement - -

- - -### One Paragraph Explainer - -A typical log is a warehouse of entries from all components and requests. Upon detection of some suspicious line or error it becomes hairy to match other lines that belong to the same specific flow (e.g. the user “John” tried to buy something). This becomes even more critical and challenging in microservices environment when a request/transaction might span across multiple computers. Address this by assigning a unique transaction Id value to all the entries from the same request so when detecting one line one can copy the id and search for every line that has similar transaction Id. However, achieving this In Node is not straightforward as a single thread is used to serve all requests –consider using a library that that can group data on the request level – see code example on the next slide. When calling other microservice, pass the transaction Id using an HTTP header “x-transaction-id” to keep the same context. - -

- - -### Code example: typical nginx configuration - -```javascript -//when receiving a new request, start a new isolated context and set a transaction Id. The following example is using the NPM library continuation-local-storage to isolate requests - -var createNamespace = require('continuation-local-storage').createNamespace; -var session = createNamespace('my session'); - router.get('/:id', (req, res, next) => { - session.set('transactionId', 'some unique GUID'); - someService.getById(req.params.id); - logger.info('Starting now to get something by Id'); -}//Now any other service or components can have access to the contextual, per-request, data -class someService { - getById(id) { - logger.info(“Starting now to get something by Id”); - //other logic comes here - } -}//Logger can now append transaction-id to each entry, so that entries from the same request will have the same value -class logger{ - info (message) - {console.log(`message ${session.get('transactionId')}`);} -} -``` - -

- -### What Other Bloggers Say -From the blog [ARG! TEAM](http://blog.argteam.com/coding/hardening-node-js-for-production-part-2-using-nginx-to-avoid-node-js-load): -> ...Although express.js has built in static file handling through some connect middleware, you should never use it. *Nginx can do a much better job of handling static files and can prevent requests for non-dynamic content from clogging our node processes*... diff --git a/translations/spanish/sections/production/bestateless.md b/translations/spanish/sections/production/bestateless.md deleted file mode 100644 index 29d2bec0e..000000000 --- a/translations/spanish/sections/production/bestateless.md +++ /dev/null @@ -1,39 +0,0 @@ -# Be stateless, kill your Servers almost every day - -

- - -### One Paragraph Explainer - -Have you ever encountered a severe production issue where one server was missing some piece of configuration or data? That is probably due to some unnecessary dependency on some local asset that is not part of the deployment. Many successful products treat servers like a phoenix bird – it dies and rebirth periodically without any damage. In other words, a server is just a piece of hardware that executes your code for some time and then get replaced. -This approach: -1. allows to scale by adding and removing servers dynamically without any side-affect -2. simplifies the maintenance as it frees our mind from evaluating each server state. - -

- - -### Code example: anti-patterns - -```javascript -//Typical mistake 1: saving uploaded files locally in a server -var multer = require('multer') //express middleware for fetching uploads -var upload = multer({ dest: 'uploads/' }) -app.post('/photos/upload', upload.array('photos', 12), function (req, res, next) {}) -//Typical mistake 2: storing authentication sessions (passport) in a local file or memory -var FileStore = require('session-file-store')(session); -app.use(session({ - store: new FileStore(options), - secret: 'keyboard cat' -})); -//Typical mistake3: storing information on the global object -Global.someCacheLike.result = {somedata} -``` - -

- -### What Other Bloggers Say -From the blog [Martin Fowler](https://martinfowler.com/bliki/PhoenixServer.html): -> ...One day I had this fantasy of starting a certification service for operations. The certification assessment would consist of a colleague and I turning up at the corporate data center and setting about critical production servers with a baseball bat, a chainsaw, and a water pistol. The assessment would be based on how long it would take for the operations team to get all the applications up and running again. This may be a daft fantasy, but there’s a nugget of wisdom here. While you should forego the baseball bats, it is a good idea to virtually burn down your servers at regular intervals. A server should be like a phoenix, regularly rising from the ashes... - -

diff --git a/translations/spanish/sections/production/createmaintenanceendpoint.md b/translations/spanish/sections/production/createmaintenanceendpoint.md deleted file mode 100644 index 08ded442f..000000000 --- a/translations/spanish/sections/production/createmaintenanceendpoint.md +++ /dev/null @@ -1,35 +0,0 @@ -# Create a maintenance endpoint - -

- - -### One Paragraph Explainer - -A maintenance endpoint is a plain secured HTTP API that is part of the app code and it’s purpose if for the ops/production team to view and invoke multiple useful functionality. For example, it can return a head dump (memory snapshot) of the process, report whether there are some memory leaks and even allow to execute REPL command directly. This endpoint is needed where the conventional devops tools (monitoring products, logs, etc) fails to gather some specific type of information or you choose not to buy/install such tools. The golden rule is using professional and external tools for monitoring and maintaining the production, these are usually more robust and accurate. That said, there are likely to be cases where the generic tools will fail to extract information that is specific to Node or to your app – for example, should you wish to generate a memory snapshot at the moment GC completed a cycle – few NPM libraries will be glad to perform this for you but popular monitoring tools will likely to miss this functionality - -

- - -### Code example: generating a head dump via code - -```javascript -var heapdump = require('heapdump'); - -router.get('/ops/headump', (req, res, next) => { - logger.info(`About to generate headump`); - heapdump.writeSnapshot(function (err, filename) { - console.log('headump file is ready to be sent to the caller', filename); - fs.readFile(filename, "utf-8", function (err, data) { - res.end(data); - }); - }); -}); -``` - -

- -### Recommended Watch - -▶ [Getting your Node.js app production ready](http://mubaloo.com/best-practices-deploying-node-js-applications) - -![Getting your Node.js app production ready](/assets/images/createmaintenanceendpoint1.png "Getting your Node.js app production ready") diff --git a/translations/spanish/sections/production/delegatetoproxy.md b/translations/spanish/sections/production/delegatetoproxy.md deleted file mode 100644 index 587eb9221..000000000 --- a/translations/spanish/sections/production/delegatetoproxy.md +++ /dev/null @@ -1,50 +0,0 @@ -# Delegate anything possible (e.g. static content, gzip) to a reverse proxy - -

- - -### One Paragraph Explainer - -It’s very tempting to cargo-cult Express and use its rich middleware offering for networking related tasks like serving static files, gzip encoding, throttling requests, SSL termination, etc. This is a performance kill due to its single threaded model which will keep the CPU busy for long periods (Remember, Node’s execution model is optimized for short tasks or async IO related tasks). A better approach is to use a tool that expertise in networking tasks – the most popular are nginx and HAproxy which are also used by the biggest cloud vendors to lighten the incoming load on node.js processes. - -

- - -### Code Example – explanation - -```javascript -gzip on; -#defining gzip compression -gzip_comp_level 6; -gzip_vary on; -upstream myApplication { - server 127.0.0.1:3000; - server 127.0.0.1:3001; - keepalive 64; -} - -#defining web server -server { - listen 80; - listen 443 ssl; - ssl_certificate /some/location/sillyfacesociety.com.bundle.crt; - error_page 502 /errors/502.html; - #handling static content - location ~ ^/(images/|img/|javascript/|js/|css/|stylesheets/|flash/|media/|static/|robots.txt|humans.txt|favicon.ico) { - root /usr/local/silly_face_society/node/public; - access_log off; - expires max; -} -``` - -

- -### What Other Bloggers Say - -* From the blog [Mubaloo](http://mubaloo.com/best-practices-deploying-node-js-applications): -> …It’s very easy to fall into this trap – You see a package like Express and think “Awesome! Let’s get started” – you code away and you’ve got an application that does what you want. This is excellent and, to be honest, you’ve won a lot of the battle. However, you will lose the war if you upload your app to a server and have it listen on your HTTP port, because you’ve forgotten a very crucial thing: Node is not a web server. **As soon as any volume of traffic starts to hit your application, you’ll notice that things start to go wrong: connections are dropped, assets stop being served or, at the very worst, your server crashes. What you’re doing is attempting to have Node deal with all of the complicated things that a proven web server does really well. Why reinvent the wheel?** -> **This is just for one request, for one image and bearing in mind this is memory that your application could be using for important stuff like reading a database or handling complicated logic; why would you cripple your application for the sake of convenience?** - - -* From the blog [Argteam](http://blog.argteam.com/coding/hardening-node-js-for-production-part-2-using-nginx-to-avoid-node-js-load): -> Although express.js has built in static file handling through some connect middleware, you should never use it. **Nginx can do a much better job of handling static files and can prevent requests for non-dynamic content from clogging our node processes**… diff --git a/translations/spanish/sections/production/detectvulnerabilities.md b/translations/spanish/sections/production/detectvulnerabilities.md deleted file mode 100644 index 9cc850d84..000000000 --- a/translations/spanish/sections/production/detectvulnerabilities.md +++ /dev/null @@ -1,26 +0,0 @@ -# Use tools that automatically detect vulnerabilities - -

- - -### One Paragraph Explainer - -I really love the following words from a StrongLoop’s blog: “The security of your app is only as strong as the weakest link in your dependencies”. Code dependencies in fact tend to have vulnerabilities often, even the most famous and battle tested packages. for example, a threat was detected in a previous version of Express that might expose the user to a cross-site scripting attack. Luckily, community and commercial tools (all have free plans, at least for public repositories) such as nsp and snyk can keep an automatic eye on these threats, warn the team and the later can even patch these vulnerabilities automatically - -

- -### What Other Bloggers Say -From the [StrongLoop](Best Practices for Express in Production): - -> ...Using to manage your application’s dependencies is powerful and convenient. But the packages that you use may contain critical security vulnerabilities that could also affect your application. The security of your app is only as strong as the “weakest link” in your dependencies. Fortunately, there are two helpful tools you can use to ensure of the third-party packages you use: and requireSafe. These two tools do largely the same thing, so using both might be overkill, but “better safe than sorry” are words to live by when it comes to security... - -

- -### Code example: typical nginx configuration - -```javascript -//using a single line of code will attach 7 protecting middleware to Express appapp.use(helmet()); -//additional configurations can be applied on demand, this one mislead the caller to think we’re using PHP 🙂 -app.use(helmet.hidePoweredBy({ setTo: 'PHP 4.2.0' }));//other middleware are not activated by default and requires explicit configuration . -app.use(helmet.referrerPolicy({ policy: 'same-origin' })); -```` \ No newline at end of file diff --git a/translations/spanish/sections/production/frontendout.md b/translations/spanish/sections/production/frontendout.md deleted file mode 100644 index 20af87e8e..000000000 --- a/translations/spanish/sections/production/frontendout.md +++ /dev/null @@ -1,41 +0,0 @@ -# Get your frontend assets out of Node - -

- - -### One Paragraph Explainer - -In a classic web app the backend serves the frontend/graphics to the browser, a very common approach in the Node’s world is to use Express static middleware for streamlining staitc files to the client. BUT – Node is not a typical webapp as it utilizes a single thread that is not optimized to serve many files at once. Instead, consider using a reverse proxy, cloud storage or CDN (e.g. Nginx, AWS S3, Azure Blob Storage, etc) that utilizes many optimizations for this task and gain much better throughput. For example, specializes middleware like nginx embodies direct hook between the file system and the network card and multi-thread approach to minimize intervention among multiple request. - -Your optimal solution might wear one of the following forms: -1. A reverse proxy – your static files will be located right next to your Node application, only requests to the static files folder will be served by a proxy that sits in front of your Node app such as nginx. Using this approach, your Node app is responsible deploying the static files but not to serve them. Your frontend’s colleague will love this approach as it prevents cross-origin-requests from the frontend. -2. Cloud storage – your static files will NOT be part of your Node app content, else they will be uploaded to services like AWS S3, Azure BlobStorage, or other similar services that were born for this mission. Using this approach, your Node app is not responsible deploying the static files neither to serve them, hence a complete decoupling is drawn between Node and the Frontend which is any way handled by different teams. - -

- - -### Code example: typical nginx configuration for serving static files - -```javascript -gzip on; -#defining gzip compression -keepalive 64; -}#defining web server -server { -listen 80; -listen 443 ssl;#handling static content -location ~ ^/(images/|img/|javascript/|js/|css/|stylesheets/|flash/|media/|static/|robots.txt|humans.txt|favicon.ico) { -root /usr/local/silly_face_society/node/public; -access_log off; -expires max; -} -``` - -

- -### What Other Bloggers Say -From the blog [StrongLoop](https://strongloop.com/strongblog/best-practices-for-express-in-production-part-two-performance-and-reliability/): - ->…In development, you can use [res.sendFile()](http://expressjs.com/4x/api.html#res.sendFile) to serve static files. But don’t do this in production, because this function has to read from the file system for every file request, so it will encounter significant latency and affect the overall performance of the app. Note that res.sendFile() is not implemented with the sendfile system call, which would make it far more efficient. Instead, use serve-static middleware (or something equivalent), that is optimized for serving files for Express apps. An even better option is to use a reverse proxy to serve static files; see Use a reverse proxy for more information… - -

diff --git a/translations/spanish/sections/production/guardprocess.md b/translations/spanish/sections/production/guardprocess.md deleted file mode 100644 index 523d67f6a..000000000 --- a/translations/spanish/sections/production/guardprocess.md +++ /dev/null @@ -1,19 +0,0 @@ -# Guard and restart your process upon failure (using the right tool) - -

- - -### One Paragraph Explainer - -At the base level, Node processes must be guarded and restarted upon failures. Simply put, for small apps and those who don’t use containers – tools like [PM2](https://www.npmjs.com/package/pm2-docker) are perfect as they bring simplicity, restarting capabilities and also rich integration with Node. Others with strong Linux skills might use systemd and run Node as a service. Things get more interesting for apps that uses Docker or any container technology since those are usually accompanies by cluster management tools (e.g. (AWS ECS)[http://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html], [Kubernetes](https://kubernetes.io/), etc) that deploy monitor and heal containers. Having all those rich cluster management features including container restart, why mess-up with other tools like PM2? there’s no bullet proof answer. There are good reasons to keep PM2 within containers (mostly its containers specific version [pm2-docker](https://www.npmjs.com/package/pm2-docker)) as the first guarding tier – it’s much faster to restart a process and provide Node-specific features like flagging to the code when the hosting container asks to gracefully restart. Other might choose to avoid unnecessary layers. To conclude this write-up, no solution suits them all and getting to know the options is the important thing - -

- - -### What Other Bloggers Say - -* From the [Express Production Best Practices](https://expressjs.com/en/advanced/best-practice-performance.html): -> ... In development, you started your app simply from the command line with node server.js or something similar. **But doing this in production is a recipe for disaster. If the app crashes, it will be offline** until you restart it. To ensure your app restarts if it crashes, use a process manager. A process manager is a “container” for applications that facilitates deployment, provides high availability, and enables you to manage the application at runtime. - -* From the Medium blog post [Understanding Node Clustering](https://medium.com/@CodeAndBiscuits/understanding-nodejs-clustering-in-docker-land-64ce2306afef#.cssigr5z3): -> ... Understanding NodeJS Clustering in Docker-Land “Docker containers are streamlined, lightweight virtual environments, designed to simplify processes to their bare minimum. Processes that manage and coordinate their own resources are no longer as valuable. **Instead, management stacks like Kubernetes, Mesos, and Cattle have popularized the concept that these resources should be managed infrastructure-wide**. CPU and memory resources are allocated by “schedulers”, and network resources are managed by stack-provided load balancers. \ No newline at end of file diff --git a/translations/spanish/sections/production/lockdependencies.md b/translations/spanish/sections/production/lockdependencies.md deleted file mode 100644 index 35fb62b14..000000000 --- a/translations/spanish/sections/production/lockdependencies.md +++ /dev/null @@ -1,74 +0,0 @@ -# Lock dependencies - -

- - -### One Paragraph Explainer - - - -Your code depends on many external packages, let’s say it ‘requires’ and use momentjs-2.1.4, then by default when you deploy to production NPM might fetch momentjs 2.1.5 which unfortunately brings some new bugs to the table. Using NPM config files and settings –save-exact=true instructs NPM to refer to the *exact* same version that was installed so the next time you run “NPM install” (at production or within a Docker container you plan to ship forward for testing) the same dependent version will be fetched. An alternative popular approach is using a .shrinkwrap file (easily generated using NPM) that states exactly which packages and versions should be installed so no environement can get tempt to fetch newer versions. - -* **Update:** as of NPM 5, dependencies are locked automatically using .shrinkwrap. Yarn, an emerging package manager, also locks down dependencies by default - - -

- - -### Code example: .npmrc file that instructs NPM to use exact versions - -```javascript -//save this as .npmrc file on the project directory -save-exact:true -``` - -

- -### Code example: shirnkwrap.json file that distill the exact depedency tree - -```javascript -{ - "name": "A", - "dependencies": { - "B": { - "version": "0.0.1", - "dependencies": { - "C": { - "version": "0.1.0" - } - } - } - } -} -``` - -

- -### Code example: NPM 5 dependencies lock file – package.json - -```javascript -{ - "name": "package-name", - "version": "1.0.0", - "lockfileVersion": 1, - "dependencies": { - "cacache": { - "version": "9.2.6", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-9.2.6.tgz", - "integrity": "sha512-YK0Z5Np5t755edPL6gfdCeGxtU0rcW/DBhYhYVDckT+7AFkCCtedf2zru5NRbBLFk6e7Agi/RaqTOAfiaipUfg==" - }, - "duplexify": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.5.0.tgz", - "integrity": "sha1-GqdzAC4VeEV+nZ1KULDMquvL1gQ=", - "dependencies": { - "end-of-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.0.0.tgz", - "integrity": "sha1-1FlucCc0qT5A6a+GQxnqvZn/Lw4=" - } - } - } - } -} -``` diff --git a/translations/spanish/sections/production/measurememory.md b/translations/spanish/sections/production/measurememory.md deleted file mode 100644 index 53cd84eba..000000000 --- a/translations/spanish/sections/production/measurememory.md +++ /dev/null @@ -1,26 +0,0 @@ -# Measure and guard the memory usage - -

- - -### One Paragraph Explainer - -In a perfect world, a web developer shouldn’t deal with memory leaks. In reality, memory issues are a known Node’s gotcha one must be aware of. Above all, memory usage must be monitored constantly. In development and small production sites you may gauge manually using Linux commands or NPM tools and libraries like node-inspector and memwatch. The main drawback of this manual activities is that they require a human being actively monitoring – for serious production sites it’s absolutely vital to use robust monitoring tools e.g. (AWS CloudWatch, DataDog or any similar proactive system) that alerts when a leak happens. There are also few development guidelines to prevent leaks: avoid storing data on the global level, use streams for data with dynamic size, limit variables scope using let and const. - -

- -### What Other Bloggers Say - -* From the blog [Dyntrace](http://apmblog.dynatrace.com/): -> ... ”As we already learned, in Node.js JavaScript is compiled to native code by V8. The resulting native data structures don’t have much to do with their original representation and are solely managed by V8. This means that we cannot actively allocate or deallocate memory in JavaScript. V8 uses a well-known mechanism called garbage collection to address this problem.” - -* From the blog [Dyntrace](http://blog.argteam.com/coding/hardening-node-js-for-production-part-2-using-nginx-to-avoid-node-js-load): -> ... “Although this example leads to obvious results the process is always the same: -Create heap dumps with some time and a fair amount of memory allocation in between -Compare a few dumps to find out what’s growing” - -* From the blog [Dyntrace](http://blog.argteam.com/coding/hardening-node-js-for-production-part-2-using-nginx-to-avoid-node-js-load): -> ... “fault, Node.js will try to use about 1.5GBs of memory, which has to be capped when running on systems with less memory. This is the expected behaviour as garbage collection is a very costly operation. -The solution for it was adding an extra parameter to the Node.js process: -node –max_old_space_size=400 server.js –production ” -“Why is garbage collection expensive? The V8 JavaScript engine employs a stop-the-world garbage collector mechanism. In practice, it means that the program stops execution while garbage collection is in progress.” \ No newline at end of file diff --git a/translations/spanish/sections/production/monitoring.md b/translations/spanish/sections/production/monitoring.md deleted file mode 100644 index 8ba2b3c7a..000000000 --- a/translations/spanish/sections/production/monitoring.md +++ /dev/null @@ -1,39 +0,0 @@ -# Monitoring! - -

- -### One Paragraph Explainer - -At the very basic level, monitoring means you can *easily identify when bad things happen at production. For example, by getting notified by email or Slack. The challenge is to choose the right set of tools that will satisfy your requirements without breaking your bank. May I suggest, start with defining the core set of metrics that must be watched to ensure a healthy state – CPU, server RAM, Node process RAM (less than 1.4GB), the amount of errors in the last minute, number of process restarts, average response time. Then go over some advanced features you might fancy and add to your wish list. Some examples of luxury monitoring feature: DB profiling, cross-service measuring (i.e. measure business transaction), frontend integration, expose raw data to custom BI clients, Slack notifications and many others. - -Achieving the advanced features demands lengthy setup or buying a commercial product such as Datadog, newrelic and a like. Unfortunately, achieving also the basics is not a walk in the park as some metrics are hardware-related (CPU) and others live within the node process (internal errors) thus All the straightforward tools require some additional setup. For example, cloud vendor monitoring solutions (e.g. [AWS CloudWatch](https://aws.amazon.com/cloudwatch/), [Google StackDriver](https://cloud.google.com/stackdriver/)) will tell you immediately about the hardware metric but nothing about the internal app behavior. On the other end, Log-based solutions such as ElaticSeatch lack by default the hardware view. The solution is to augment your choice with missing metrics, for example, a popular choice is sending application logs to [Elastic stack](https://www.elastic.co/products) and configure some additional agent (e.g. [Beat](https://www.elastic.co/products)) to share with it hardware-related information to get the full picture. - - -

- - -### Monitoring example: AWS cloudwatch default dashboard. Hard to extract in-app metrics - -![AWS cloudwatch default dashboard. Hard to extract in-app metrics](/assets/images/monitoring1.png) - -

- -### Monitoring example: StackDriver default dashboard. Hard to extract in-app metrics - -![StackDriver default dashboard. Hard to extract in-app metrics](/assets/images/monitoring2.jpg) - -

- -### Monitoring example: Grafana as the UI layer that visualizes raw data - -![Grafana as the UI layer that visualizes raw data](/assets/images/monitoring3.png) - -

-### What Other Bloggers Say -From the blog [Rising Stack](http://mubaloo.com/best-practices-deploying-node-js-applications/): - -> …We recommend you to watch these signals for all of your services: -> Error Rate: Because errors are user facing and immediately affect your customers. -> Response time: Because the latency directly affects your customers and business. -> Throughput: The traffic helps you to understand the context of increased error rates and the latency too. -> Saturation: It tells how “full” your service is. If the CPU usage is 90%, can your system handle more traffic? … diff --git a/translations/spanish/sections/production/productoncode.md b/translations/spanish/sections/production/productoncode.md deleted file mode 100644 index 15ca7b26a..000000000 --- a/translations/spanish/sections/production/productoncode.md +++ /dev/null @@ -1,17 +0,0 @@ -# Make your code production-ready - -

- - -### One Paragraph Explainer - -Following is a list of development tips that greatly affect the production maintenance and stability: - -* The twelve-factor guide – Get familiar with the [Twelve factors](https://12factor.net/) guide -* Be stateless – Save no data locally on a specific web server (see separate bullet – ‘Be Stateless’) -* Cache – Utilize cache heavily, yet never fail because of cache mismatch -* Test memory – gauge memory usage and leaks as part your development flow, tools such as ‘memwatch’ can greatly facilitate this task -* Name functions – Minimize the usage of anonymous functions (i.e. inline callbabk) as a typical memory profiler will provide memory usage per method name -* Use CI tools – Use CI tool to detect failures before sending to production. For example, use ESLint to detect reference errors and undefined variables. Use –trace-sync-io to identify code that uses synchronous APIs (instead of the async version) -* Log wisely – Include in each log statement contextual information, hopefully in JSON format so log aggregators tools such as Elastic can search upon those properties (see separate bullet – ‘Increase visibility using smart logs’). Also, include transaction-id that identifies each request and allows to correlate lines that describe the same transaction (see separate bullet – ‘Include Transaction-ID’) -* Error management – Error handling is the Achilles’ heel of Node.JS production sites – many Node processes are crashing because of minor errors while others hang on alive in a faulty state instead of crashing. Setting your error handling strategy is absolutely critical, read here my [error handling best practices](http://goldbergyoni.com/checklist-best-practices-of-node-js-error-handling/) diff --git a/translations/spanish/sections/production/setnodeenv.md b/translations/spanish/sections/production/setnodeenv.md deleted file mode 100644 index 8cd4ad67a..000000000 --- a/translations/spanish/sections/production/setnodeenv.md +++ /dev/null @@ -1,35 +0,0 @@ -# Set NODE_ENV = production - -

- - -### One Paragraph Explainer - -Process environment variables is a set of key-value pairs made available to any running program, usually for configuration purposes. Though any variables can be used, Node encourages the convention of using a variable called NODE_ENV to flag whether we’re in production right now. This determination allows components to provide better diagnostics during development , for example by disabling caching or emitting verbose log statements. Any modern deployment tool – Chef, Puppet, CloudFormation, others – support setting environment variables during deployment - -

- - -### Code example: Setting and reading the NODE_ENV environment variable - -```javascript -//Using a command line, initializing node process and setting before environment variables -Set NODE_ENV=development&& set otherVariable=someValue&& node - -//Reading the environment variable using code -If(process.env.NODE_ENV === “production”) - useCaching = true; -``` - -

- - -### What Other Bloggers Say -From the blog [dynatrace](https://www.dynatrace.com/blog/the-drastic-effects-of-omitting-node_env-in-your-express-js-applications/): -> ...In Node.js there is a convention to use a variable called NODE_ENV to set the current mode. We see that it in fact reads NODE_ENV and defaults to ‘development’ if it isn’t set. We clearly see that by setting NODE_ENV to production the number of requests Node.js can handle jumps by around two-thirds while the CPU usage even drops slightly. *Let me emphasize this: Setting NODE_ENV to production makes your application 3 times faster!* - - -![Set NODE_ENV = production](/assets/images/setnodeenv1.png "Set NODE_ENV = production") - - -

diff --git a/translations/spanish/sections/production/smartlogging.md b/translations/spanish/sections/production/smartlogging.md deleted file mode 100644 index d2a9bb7ad..000000000 --- a/translations/spanish/sections/production/smartlogging.md +++ /dev/null @@ -1,43 +0,0 @@ -# Make your app transparent using smart logs - -

- - -### One Paragraph Explainer - -Since anyway you print out log statements and obviously in a need of some interface that wraps up production information where you can trace errors and core metrics (e.g. how many errors happen every hour and which is your slowest API end-point) why not invest some moderate effort in a robust logging framework that will tick all boxes? Achieving that requires a thoughtful decision on three steps: - -**1. smart logging** – at the bare minimum you need to use a reputable logging library like [Winston](https://github.com/winstonjs/winston), [Bunyan](https://github.com/trentm/node-bunyan) and write meaningful information at each transaction start and end. Consider to also format log statements as JSON and provide all the contextual properties (e.g. user id, operation type, etc) so that the operations team can act on those fields. Include also a unique transaction ID at each log line, for more information refer to the bullet below “Write transaction-id to log”. One last point to consider is also including an agent that logs the system resource like memory and CPU like Elastic Beat. - -**2. smart aggregation** – once you have comprehensive information within your servers file system, it’s time to periodically push these to a system that aggregates, facilities and visualizes this data. The Elastic stack, for example, is a popular and free choice that offers all the components to aggregate and visualize data. Many commercial products provide similar functionality only they greatly cut down the setup time and require no hosting. - -**3. smart visualization** – now the information is aggregated and searchable, one can be satisfied only with the power of easily searching the logs but this can go much further without coding or spending much effort. We can now show important operational metrics like error rate, average CPU throughout the day, how many new users opted-in in the last hour and any other metric that helps to govern and improve our app - -

- - -### Visualization Example: Kibana (part of Elastic stack) facilitates advanced searching on log content - -![Kibana facilitates advanced searching on log content](/assets/images/smartlogging1.png "Kibana facilitates advanced searching on log content") - -

- -### Visualization Example: Kibana (part of Elastic stack) visualizes data based on logs - -![Kibana visualizes data based on logs](/assets/images/smartlogging2.jpg "Kibana visualizes data based on logs") - -

- -### Blog Quote: Logger Requirements -From the blog [Strong Loop](https://strongloop.com/strongblog/compare-node-js-logging-winston-bunyan/): - -> Lets identify a few requirements (for a logger): -> 1. Time stamp each log line. This one is pretty self explanatory – you should be able to tell when each log entry occured. -> 2. Logging format should be easily digestible by humans as well as machines. -> 3. Allows for multiple configurable destination streams. For example, you might be writing trace logs to one file but when an error is encountered, write to the same file, then into error file and send an email at the same time… - -

- - - -

diff --git a/translations/spanish/sections/production/utilizecpu.md b/translations/spanish/sections/production/utilizecpu.md deleted file mode 100644 index 5fa6a9fec..000000000 --- a/translations/spanish/sections/production/utilizecpu.md +++ /dev/null @@ -1,27 +0,0 @@ -# Utilize all CPU cores - -

- - -### One Paragraph Explainer - -It might not come as a surprise that at its basic form, Node runs over a single thread=single process=single CPU. Paying for beefy hardware with 4 or 8 CPU and utilizing only one sounds crazy, right? The quickest solution which fits medium sized apps is using Node’s Cluster module which in 10 lines of code spawns a process for each logical core and route requests between the processes in a round-robin style. Even better, use PM2 which sugarcoats the clustering module with a simple interface and cool monitoring UI. While this solution works well for traditional applications, it might fall short for applications that require top-notch performance and robust devops flow. For those advanced use cases, consider replicating the NODE process using custom deployment script and balancing using a speciaizlies tool such as nginx or use a container engine such as AWS ECS or Kubernetees that are have advanced features for placement and replication of processes. - -

- - -### Comparison: Balancing using Node’s cluster vs nginx - -![Balancing using Node’s cluster vs nginx](/assets/images/utilizecpucores1.png "Balancing using Node’s cluster vs nginx") - -

- -### What Other Bloggers Say -* From the [Node.JS documentation](https://nodejs.org/api/cluster.html#cluster_how_it_works): -> ... The second approach, Node clusters, should, in theory, give the best performance. In practice however, distribution tends to be very unbalanced due to operating system scheduler vagaries. Loads have been observed where over 70% of all connections ended up in just two processes, out of a total of eight ... - -* From the blog [StrongLoop](From the blog StrongLoop): -> ... Clustering is made possible with Node’s cluster module. This enables a master process to spawn worker processes and distribute incoming connections among the workers. However, rather than using this module directly, it’s far better to use one of the many tools out there that does it for you automatically; for example node-pm or cluster-service ... - -* From the Medium post [Node.js process load balance performance: comparing cluster module, iptables and Nginx](https://medium.com/@fermads/node-js-process-load-balancing-comparing-cluster-iptables-and-nginx-6746aaf38272) -> ... Node cluster is simple to implement and configure, things are kept inside Node’s realm without depending on other software. Just remember your master process will work almost as much as your worker processes and with a little less request rate then the other solutions ... \ No newline at end of file diff --git a/translations/spanish/sections/projectstructre/breakintcomponents.md b/translations/spanish/sections/projectstructre/breakintcomponents.md deleted file mode 100644 index 65154d7d5..000000000 --- a/translations/spanish/sections/projectstructre/breakintcomponents.md +++ /dev/null @@ -1,26 +0,0 @@ -# Estructura tu solución en componentes - -

- - -### One Paragraph Explainer - -For medium sized apps and above, monoliths are really bad - a one big software with many dependencies is just hard to reason about and often lead to code spaghetti. Even those smart architects who are skilled to tame the beast and 'modulurize' it - spend great mental effort on design and each change requires to carefully evaluate the impact on other dependant objects. The ultimate solution is to develop small software: divide the whole stack into self-contained components that don't share files with others, each constitute very few files (e.g. API, service, data access, test, etc) so that it's very easy to reason about it. Some may call this 'microservices' architecture - it's important to understand that microservices is not a spec which you must follow rather a set of principles. You may adopt many principles into a full-blown microservices architecture or adopt only few. Both are good as long as you keep the software complexity low. The very least you should do is create a basic borders between components, assign a folder in your project root for each business component and make it self contained - other components are allowed to consumeits functionality only through its public interface or API. This is the foundation for keeping your components simple, avoid dependencies hell and pave the way to full-blown microservices in the future once your app grows - -

- - -### Blog Quote: "Scaling requires scaling of the entire application" - From the blog MartinFowler.com - - > Monolithic applications can be successful, but increasingly people are feeling frustrations with them - especially as more applications are being deployed to the cloud . Change cycles are tied together - a change made to a small part of the application, requires the entire monolith to be rebuilt and deployed. Over time it's often hard to keep a good modular structure, making it harder to keep changes that ought to only affect one module within that module. Scaling requires scaling of the entire application rather than parts of it that require greater resource. - -

- - ### Good: Structure your solution by self-contained components -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Structuring solution by components") - -

- -### Bad: Group your files by technical role -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebyroles.PNG "Structuring solution by technical roles") diff --git a/translations/spanish/sections/projectstructre/configguide.md b/translations/spanish/sections/projectstructre/configguide.md deleted file mode 100644 index 784e96e4d..000000000 --- a/translations/spanish/sections/projectstructre/configguide.md +++ /dev/null @@ -1,34 +0,0 @@ -# Use environment aware, secure and hirearchical config - -

- - -### One Paragraph Explainer - -When dealing with configuration data, many things can just annoy and slow down: (1) setting all the keys using process environment variables becomes very tedious when in need to inject 100 keys (instead of just committing those in a config file), however when dealing with files only the devops admins can not alter the behaviour without changing the code. A reliable config solution must combine both configuration files + overrides from the process variables (b) when specifying all keys in a flat JSON, it become frustrating to find and modify entries when the list grows big. An hirearchical JSON files that is grouped into section can overcome this issue + few config libraries allows to store the configuration in multiple files and take care to union all in runtime. See example below (3) storing sensitive information like DB password is obviously not recommended but no quick and handy solution exists for this challenge. Some configuraiton library allows to encrypt files, others encrypt those entries during GIT commits or simple don't store real values for those entries and specify the actual value during deployment via environment variables. (4) some advanced config scenario demand to inject configuration value via command line (vargs) or sync configuration info via centralized cache like Redis so different servers won't hold different data. - -Some configuration libraries can provide most of these features for free, have a look at NPM libraries like [nconf](https://www.npmjs.com/package/nconf) and [config](https://www.npmjs.com/package/config) which tick many of these requirements. - -

- -### Code Example – hirearchical config helps to find entries and maintain huge config files - -```javascript -{ - // Customer module configs - "Customer": { - "dbConfig": { - "host": "localhost", - "port": 5984, - "dbName": "customers" - }, - "credit": { - "initialLimit": 100, - // Set low for development - "initialDays": 1 - } - } -} -``` - -

diff --git a/translations/spanish/sections/projectstructre/createlayers.md b/translations/spanish/sections/projectstructre/createlayers.md deleted file mode 100644 index be55b3fc7..000000000 --- a/translations/spanish/sections/projectstructre/createlayers.md +++ /dev/null @@ -1,11 +0,0 @@ -# Layer your app, keep Express within its boundaries - -

- - ### Separate component code into layers: web, services and DAL -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Separate component code into layers") - -

- -### 1 min explainer: The downside of mixing layers -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/keepexpressinweb.gif "The downside of mixing layers") diff --git a/translations/spanish/sections/projectstructre/separateexpress.md b/translations/spanish/sections/projectstructre/separateexpress.md deleted file mode 100644 index 389a7aa14..000000000 --- a/translations/spanish/sections/projectstructre/separateexpress.md +++ /dev/null @@ -1,47 +0,0 @@ -# Separate Express 'app' and 'server' - -

- - -### One Paragraph Explainer - -The latest Express generator comes with a great practice that is worth to keep - the API declaration is separated from the network related configuration (port, protocol, etc). This allows testing the API in-process, without performing network calls, with all the benefits that it brings to the table: fast testing execution and getting coverage metrics of the code. It also allows deploying the same API under flexible and different network conditions. Bonus: better separation of concerns and cleaner code - -

- -### Code example: API declaration, should reside in app.js - -```javascript -var app = express(); -app.use(bodyParser.json()); -app.use("/api/events", events.API); -app.use("/api/forms", forms); - -``` - -

- -### Code example: Server network declaration, should reside in /bin/www - -```javascript -var app = require('../app'); -var http = require('http'); - -/** - * Get port from environment and store in Express. - */ - -var port = normalizePort(process.env.PORT || '3000'); -app.set('port', port); - -/** - * Create HTTP server. - */ - -var server = http.createServer(app); - -``` - - -### Example: test your API in-process using supertest (popular testing package) -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/supertestinprocess.PNG "In process testing with Supertest") diff --git a/translations/spanish/sections/projectstructre/thincomponents.md b/translations/spanish/sections/projectstructre/thincomponents.md deleted file mode 100644 index 8b61a0438..000000000 --- a/translations/spanish/sections/projectstructre/thincomponents.md +++ /dev/null @@ -1,26 +0,0 @@ -# Structure your solution by components - -

- - -### One Paragraph Explainer - -For medium sized apps and above, monoliths are really bad - a one big software with many dependencies is just hard to reason about and often lead to code spaghetti. Even those smart architects who are skilled to tame the beast and 'modulurize' it - spend great mental effort on design and each change requires to carefully evaluate the impact on other dependant objects. The ultimate solution is to develop small software: divide the whole stack into self-contained components that don't share files with others, each constitute very few files (e.g. API, service, data access, test, etc) so that it's very easy to reason about it. Some may call this 'microservices' architecture - it's important to understand that microservices is not a spec which you must follow rather a set of principles. You may adopt many principles into a full-blown microservices architecture or adopt only few. Both are good as long as you keep the software complexity low. The very least you should do is create a basic borders between components, assign a folder in your project root for each business component and make it self contained - other components are allowed to consumeits functionality only through its public interface or API. This is the foundation for keeping your components simple, avoid dependencies hell and pave the way to full-blown microservices in the future once your app grows - -

- - -### Blog Quote: "Scaling requires scaling of the entire application" - From the blog MartinFowler.com - - > Monolithic applications can be successful, but increasingly people are feeling frustrations with them - especially as more applications are being deployed to the cloud . Change cycles are tied together - a change made to a small part of the application, requires the entire monolith to be rebuilt and deployed. Over time it's often hard to keep a good modular structure, making it harder to keep changes that ought to only affect one module within that module. Scaling requires scaling of the entire application rather than parts of it that require greater resource. - -

- - ### Good: Structure your solution by self-contained components -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Structuring solution by components") - -

- -### Bad: Group your files by technical role -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebyroles.PNG "Structuring solution by technical roles") diff --git a/translations/spanish/sections/projectstructre/wraputilities.md b/translations/spanish/sections/projectstructre/wraputilities.md deleted file mode 100644 index 6d47e6050..000000000 --- a/translations/spanish/sections/projectstructre/wraputilities.md +++ /dev/null @@ -1,14 +0,0 @@ -# Wrap common utilities as NPM packages - -

- - -### One Paragraph Explainer -Once you start growing and have different components on different servers which consumes similar utilities, you should start managing the dependencies - how can you keep 1 copy of your utility code and let multiple consumer components use and deploy it? well, there is a framework for that, it's called NPM... Start by wrapping 3rd party utility packages with your own code to make it easily replaceable in the future and publish your own code as private NPM package. Now, all your code base can import that code and benefit free dependency management framework. It's possible to publish NPM packages for your own private use without sharing it publicly using [private modules](https://docs.npmjs.com/private-modules/intro), [private registry](https://npme.npmjs.com/docs/tutorials/npm-enterprise-with-nexus.html) or [local NPM packages](https://medium.com/@arnaudrinquin/build-modular-application-with-npm-local-modules-dfc5ff047bcc) - - -

- - - ### Sharing your own common utilities across environments and components -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/Privatenpm.png "Structuring solution by components") diff --git a/translations/spanish/sections/template.md b/translations/spanish/sections/template.md deleted file mode 100644 index 22ca4a553..000000000 --- a/translations/spanish/sections/template.md +++ /dev/null @@ -1,40 +0,0 @@ -# Title here - -

- - -### One Paragraph Explainer - -Text - -

- - -### Code Example – explanation - -```javascript -code here -``` - -

- -### Code Example – another - -```javascript -code here -``` - -

- -### Blog Quote: "Title" - From the blog pouchdb.com, ranked 11 for the keywords “Node Promises” - - > …text here - -

- - ### Image title -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/swaggerDoc.png "API error handling") - - -

diff --git a/translations/spanish/sections/testingandquality/bumpversion.md b/translations/spanish/sections/testingandquality/bumpversion.md deleted file mode 100644 index 29216b336..000000000 --- a/translations/spanish/sections/testingandquality/bumpversion.md +++ /dev/null @@ -1,30 +0,0 @@ -# Title here - - -### One Paragraph Explainer - -Text - - -### Code Example – explanation - -```javascript -code here -``` - -### Code Example – another - -```javascript -code here -``` - -### Blog Quote: "Title" - From the blog pouchdb.com, ranked 11 for the keywords “Node Promises” - - > …text here - - ### Image title -![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/swaggerDoc.png "API error handling") - - - From e858ad2ff4f4eca7f6c0d51ad5229c22c644c9d0 Mon Sep 17 00:00:00 2001 From: Eduardo Montalvo Date: Mon, 8 Jan 2018 21:54:34 -0600 Subject: [PATCH 6/7] Fix markdown errors in spanish translate --- README.md | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 20c4d4fc1..54c5014e8 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ **De lo contrario:** No cumplir con ninguno de los requisitos de configuración simplemente frena al equipo de desarrollo o al equipo de devpos. Probablemente ambos -🔗 [**Leer más: buenas prácticas de configuración **](/sections/projectstructre/configguide.md) +🔗 [**Leer más: buenas prácticas de configuración**](/sections/projectstructre/configguide.md)


@@ -100,7 +100,7 @@ **De lo contrario:** El estilo de callback de Node.JS, function (err, response), es una forma prometedora de código no mantenible debido a la combinación de manejo de errores con código accidentado, anidación excesiva y patrones de codificación incómodos -🔗 [** Leer más: evitar callbacks **] (/sections/errorhandling/asyncerrorhandling.md) +🔗 [**Leer más: evitar callbacks**](/sections/errorhandling/asyncerrorhandling.md)

@@ -111,97 +111,97 @@ **De lo contrario:** Al invocar algún componente, no estar seguro de qué tipo de errores son retornados, hace que sea mucho más difícil manejar los errores de forma adecuada. Peor aún, el uso de tipos personalizados para describir los errores puede conducir a la pérdida de información de error crítico como el seguimiento de la pila. -🔗 [** Leer más: utilizando el objeto de Error incorporado **](/sections/errorhandling/useonlythebuiltinerror.md) +🔗 [**Leer más: utilizando el objeto de Error incorporado**](/sections/errorhandling/useonlythebuiltinerror.md)

-##! [✔] 2.3 Distinguir errores operacionales contra errores del programador +## ![✔] 2.3 Distinguir errores operacionales contra errores del programador **TL;DR:** Los errores operacionales (por ejemplo, el API recibió una entrada no válida) se refieren a casos conocidos en los que el impacto del error se entiende completamente y se pueden manejar con cuidado. Por otro lado, el error del programador (por ejemplo, tratar de leer la variable no definida) se refiere a fallas desconocidas del código que ordenan reiniciar correctamente la aplicación **De lo contrario:** Siempre puedes reiniciar la aplicación cuando aparece un error, pero ¿por qué dejar ~5000 usuarios en línea abajo debido a un error operacional menor, previsto? lo contrario tampoco es ideal: mantener la aplicación activa cuando se produce un problema desconocido (error del programador) puede provocar un comportamiento imprevisto. La diferenciación de los dos permite actuar con tacto y aplicar un enfoque equilibrado basado en el contexto dado -  🔗 [** Leer más: error operacional vs programador **](/sections/errorhandling/operationalvsprogrammererror.md) +  🔗 [**Leer más: error operacional vs programador**](/sections/errorhandling/operationalvsprogrammererror.md)

-##! [✔] 2.4 Manejar los errores centralmente, no dentro de un middleware Express +## ![✔] 2.4 Manejar los errores centralmente, no dentro de un middleware Express **TL;DR:** La lógica de manejo de errores, como un correo al administrador y registro de logs, debe encapsularse en un objeto dedicado y centralizado al que todos los end-points (por ejemplo, Express middleware, cron jobs, unit-testing) llaman cuando se produce un error . **De lo contrario:** No manejar los errores dentro de un solo lugar dará lugar a la duplicación del código y, probablemente, a los errores que se manejan de forma incorrecta -🔗 [** Leer más: manejo de errores en un lugar centralizado **](/sections/errorhandling/centralizedhandling.md) +🔗 [**Leer más: manejo de errores en un lugar centralizado**](/sections/errorhandling/centralizedhandling.md)

-##! [✔] 2.5 Errores del API Document con Swagger +## ![✔] 2.5 Errores del API Document con Swagger **TL;DR:** Deja que los clientes de tu API sepan qué errores podrían presentarse como respuesta para que puedan manejarlos cuidadosamente sin fallar. Esto se hace generalmente con frameworks de documentación REST API como Swagger **De lo contrario:** Un cliente del API podría decidir bloquearse y reiniciarse solo porque recibió un error que no pudo entender. Nota: la persona que llama de su API puede ser tu (muy típico en un entorno de microservicios) -🔗 [** Leer más: documentación de errores en Swagger **](/sections/errorhandling/documentingusingswagger.md) +🔗 [**Leer más: documentación de errores en Swagger**](/sections/errorhandling/documentingusingswagger.md)

-##! [✔] 2.6 Cerrar el proceso elegantemento cuando un extraño llega +## ![✔] 2.6 Cerrar el proceso elegantemento cuando un extraño llega **TL; DR:** Cuando se produce un error desconocido (un error del desarrollador, consulta el número de práctica recomendada número #3): existe incertidumbre acerca del estado de la aplicación. Una práctica común sugiere reiniciar el proceso cuidadosamente usando una herramienta 'reiniciadora' como Forever y PM2. **De lo contrario:** Cuando se detecta una excepción desconocida, algunos objetos pueden estar en un estado defectuoso (por ejemplo, un emisor de eventos que se usa globalmente y que ya no se activan debido a fallas internas) y todas las solicitudes futuras pueden fallar o comportarse de manera loca -🔗 [** Leer más: cerrar el proceso **] (/sections/errorhandling/shuttingtheprocess.md) +🔗 [**Leer más: cerrar el proceso**](/sections/errorhandling/shuttingtheprocess.md)

-##! [✔] 2.7 Usa un logger maduro para aumentar la visibilidad de los errores +## ![✔] 2.7 Usa un logger maduro para aumentar la visibilidad de los errores **TL;DR:** Un conjunto de herramientas de registro maduras como Winston, Bunyan o Log4J acelerará el descubrimiento y la comprensión de errores. Así que olvídate de console.log. **De lo contrario:** Navegando a través de console.logs o manualmente a través de un archivo de texto desordenado sin consultar herramientas o un lector de registro decente puede mantenerte ocupado en el trabajo hasta tarde -🔗 [** Leer más: utilizando un registrador maduro **] (/sections/errorhandling/usematurelogger.md) +🔗 [**Leer más: utilizando un registrador maduro**](/sections/errorhandling/usematurelogger.md)

-##! [✔] 2.8 Flujos de errores de prueba usando su test framework favorito +## ![✔] 2.8 Flujos de errores de prueba usando su test framework favorito **TL;DR:** Ya sea que se trate de un profesional de QA automatizado o de una prueba de desarrollador manual: asegúrate de que tu código no solo satisfaga un escenario positivo sino que también maneje y devuelva los errores correctos. Frameworks de prueba como Mocha & Chai pueden manejar esto fácilmente (vea ejemplos de código dentro del "Gist emergente") **De lo contrario:** Sin pruebas, ya sea automática o manualmente, no puedes confiar en nuestro código para devolver los errores correctos. Sin errores significativos, no hay manejo de errores -🔗 [** Leer más: probar los flujos de error **] (/sections/errorhandling/testingerrorflows.md) +🔗 [**Leer más: probar los flujos de error**](/sections/errorhandling/testingerrorflows.md)

-##! [✔] 2.9 Descubre errores y tiempo de inactividad usando productos APM +## ![✔] 2.9 Descubre errores y tiempo de inactividad usando productos APM **TL;DR:** Los productos de monitoreo y rendimiento (a.k.a APM) miden de forma proactiva tu base de código o API para auto-mágicamente resaltar errores, bloqueos y ralentizar automáticamente partes que echas en falta. **De lo contrario:** Es posible que dediques un gran esfuerzo a medir el rendimiento y los tiempos de inactividad de la API, probablemente nunca sabrás cuáles son las piezas de código más lentas en el escenario del mundo real y cómo afectan estas a la experiencia del usuario. -🔗 [** Leer más: utilizando productos APM **] (/sections/errorhandling/apmproducts.md) +🔗 [**Leer más: utilizando productos APM**](/sections/errorhandling/apmproducts.md)

-##! [✔] 2.10 Captura rechazos de promesas no controladas +## ![✔] 2.10 Captura rechazos de promesas no controladas **TL;DR:** Cualquier excepción lanzada dentro de una promesa será tragada y descartada a menos que un desarrollador no se olvide de manejarla de manera explícita. ¡Incluso si su código está suscrito a process.uncaughtException! Supera esto registrándose en el proceso del evento. **De lo contrario:** Tus errores serán tragados y no dejarán rastros. Nada de que preocuparse -🔗 [** Leer más: captura rechazos de promesas no controladas **] (/sections/errorhandling/catchunhandledpromiserejection.md) +🔗 [**Leer más: captura rechazos de promesas no controladas**](/sections/errorhandling/catchunhandledpromiserejection.md)

-##! [✔] 2.11 Falla rápidamente, valida argumentos usando una biblioteca dedicada +## ![✔] 2.11 Falla rápidamente, valida argumentos usando una biblioteca dedicada **TL; DR:** Esto debería ser parte de sus mejores prácticas para Express - API de Assert para evitar errores desagradables que son mucho más difíciles de seguir más adelante. El código de validación suele ser tedioso a menos que se utilicen bibliotecas muy interesantes como Joi **De lo contrario:** Considera esto: tu función espera un argumento numérico "Descuento" que la persona que llama olvida pasar, más adelante su código comprueba si Descuento!= 0 (cantidad de descuento permitido es mayor que cero), entonces permitirás el usuario que disfrute de un descuento. Dios mío, qué desagradable error. ¿Puedes verlo? -🔗 [** Leer más: falla rapidamente **] (/sections/errorhandling/failfast.md) +🔗 [**Leer más: falla rapidamente**](/sections/errorhandling/failfast.md)


From 541500b2e996ee17e595bcc7aa831520c0c1ad6b Mon Sep 17 00:00:00 2001 From: Eduardo Montalvo Date: Tue, 9 Jan 2018 13:24:49 -0600 Subject: [PATCH 7/7] Finished the spanish translation for project structure section --- .../projectstructre/breakintcomponents.md | 9 ++++++- sections/projectstructre/separateexpress.md | 2 +- sections/projectstructre/thincomponents.md | 24 ++++++++++++------- sections/projectstructre/wraputilities.md | 2 +- 4 files changed, 25 insertions(+), 12 deletions(-) diff --git a/sections/projectstructre/breakintcomponents.md b/sections/projectstructre/breakintcomponents.md index 7b128d2c0..e9b686e91 100644 --- a/sections/projectstructre/breakintcomponents.md +++ b/sections/projectstructre/breakintcomponents.md @@ -9,12 +9,19 @@ Para aplicaciones medianas y superiores, los monolitos son realmente malos: un s

-### Cita de Blog: "El escalado requiere escalar toda la aplicación" +### Cita de Blog: "Scaling requires scaling of the entire application" De el blog MartinFowler.com > Las aplicaciones monolíticas pueden ser exitosas, pero cada vez más personas sienten frustraciones con ellas, especialmente a medida que se implementan más aplicaciones en la nube. Los ciclos de cambio están unidos: un cambio realizado en una pequeña parte de la aplicación requiere que se reconstruya y despliegue todo el monolito. Con el tiempo, a menudo es difícil mantener una buena estructura modular, lo que hace más difícil mantener los cambios que solo deberían afectar a un módulo dentro de ese módulo. El escalado requiere escalar toda la aplicación en lugar de partes de ella que requieren un mayor recurso.

+ + ### Cita del blog: "So what does the architecture of your application scream?" + +Del blog uncle-bob + +> ... si estuvieras mirando la arquitectura de una biblioteca, es probable que veas una gran entrada, un área para salida/entrada de empleados, áreas de lectura, salas de conferencias pequeñas y galería tras galería capaz de sostener estanterías para todos los libros en la biblioteca. Esa arquitectura gritaría: Biblioteca. +Entonces, ¿qué grita la arquitectura de tu aplicación? Cuando observas la estructura de directorios de nivel superior y los archivos de origen en el paquete de nivel superior; ¿gritan: Sistema de Cuidado de la Salud, o Sistema de Contabilidad, o Sistema de Gestión de Inventario? ¿O gritan: Rails o Spring / Hibernate o ASP? ### Bien: Estructura tu solución en componentes autónomos ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Structuring solution by components") diff --git a/sections/projectstructre/separateexpress.md b/sections/projectstructre/separateexpress.md index fd82eb831..2ed062c2d 100644 --- a/sections/projectstructre/separateexpress.md +++ b/sections/projectstructre/separateexpress.md @@ -37,5 +37,5 @@ app.set('port', port); var server = http.createServer(app); ``` -### Ejemplo: prueba tu API en proceso usanso supertest (paquete de testing popular) +### Ejemplo: prueba tu API en proceso usando supertest (paquete de testing popular) ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/supertestinprocess.PNG "In process testing with Supertest") diff --git a/sections/projectstructre/thincomponents.md b/sections/projectstructre/thincomponents.md index 8b61a0438..e9b686e91 100644 --- a/sections/projectstructre/thincomponents.md +++ b/sections/projectstructre/thincomponents.md @@ -1,26 +1,32 @@ -# Structure your solution by components +# Estructura tu solución en componentes Structure your solution by components

+### Un párrafo explicativo -### One Paragraph Explainer - -For medium sized apps and above, monoliths are really bad - a one big software with many dependencies is just hard to reason about and often lead to code spaghetti. Even those smart architects who are skilled to tame the beast and 'modulurize' it - spend great mental effort on design and each change requires to carefully evaluate the impact on other dependant objects. The ultimate solution is to develop small software: divide the whole stack into self-contained components that don't share files with others, each constitute very few files (e.g. API, service, data access, test, etc) so that it's very easy to reason about it. Some may call this 'microservices' architecture - it's important to understand that microservices is not a spec which you must follow rather a set of principles. You may adopt many principles into a full-blown microservices architecture or adopt only few. Both are good as long as you keep the software complexity low. The very least you should do is create a basic borders between components, assign a folder in your project root for each business component and make it self contained - other components are allowed to consumeits functionality only through its public interface or API. This is the foundation for keeping your components simple, avoid dependencies hell and pave the way to full-blown microservices in the future once your app grows +Para aplicaciones medianas y superiores, los monolitos son realmente malos: un software grande con muchas dependencias es simplemente difícil de entender y a menudo conduce a código espagueti. Incluso aquellos arquitectos inteligentes que están capacitados para domesticar a la bestia y "modularla": dedican un gran esfuerzo mental al diseño y cada cambio requiere evaluar cuidadosamente el impacto en otros objetos dependientes. La solución definitiva es desarrollar software pequeño: divide el stack completo en componentes independientes que no compartan archivos con otros, cada componente constituye muy pocos archivos (por ejemplo, API, servicio, acceso a datos, test, etc.) para que sea muy fácil entender. Algunos pueden llamar a esto 'arquitectura de microservicios': es importante entender que los microservicios no son una especificación que debas seguir sino un conjunto de principios. Puedes adoptar muchos principios en una arquitectura de microservicios en toda regla o adoptar solo unos pocos. Ambos son buenos siempre y cuando mantengas baja la complejidad del software. Lo mínimo que debes hacer es crear una frontera básica entre los componentes, asignar una carpeta en la raíz del proyecto para cada componente de negocio y hacerlo autónomo: otros componentes pueden consumir su funcionalidad solo a través de su interfaz pública o API. Esta es la base para mantener tus componentes simples, evitar las dependencias y allanar el camino a los microservicios en el futuro una vez que tu aplicación crezca

-### Blog Quote: "Scaling requires scaling of the entire application" - From the blog MartinFowler.com +### Cita de Blog: "Scaling requires scaling of the entire application" + De el blog MartinFowler.com - > Monolithic applications can be successful, but increasingly people are feeling frustrations with them - especially as more applications are being deployed to the cloud . Change cycles are tied together - a change made to a small part of the application, requires the entire monolith to be rebuilt and deployed. Over time it's often hard to keep a good modular structure, making it harder to keep changes that ought to only affect one module within that module. Scaling requires scaling of the entire application rather than parts of it that require greater resource. + > Las aplicaciones monolíticas pueden ser exitosas, pero cada vez más personas sienten frustraciones con ellas, especialmente a medida que se implementan más aplicaciones en la nube. Los ciclos de cambio están unidos: un cambio realizado en una pequeña parte de la aplicación requiere que se reconstruya y despliegue todo el monolito. Con el tiempo, a menudo es difícil mantener una buena estructura modular, lo que hace más difícil mantener los cambios que solo deberían afectar a un módulo dentro de ese módulo. El escalado requiere escalar toda la aplicación en lugar de partes de ella que requieren un mayor recurso.

+ + ### Cita del blog: "So what does the architecture of your application scream?" + +Del blog uncle-bob + +> ... si estuvieras mirando la arquitectura de una biblioteca, es probable que veas una gran entrada, un área para salida/entrada de empleados, áreas de lectura, salas de conferencias pequeñas y galería tras galería capaz de sostener estanterías para todos los libros en la biblioteca. Esa arquitectura gritaría: Biblioteca. +Entonces, ¿qué grita la arquitectura de tu aplicación? Cuando observas la estructura de directorios de nivel superior y los archivos de origen en el paquete de nivel superior; ¿gritan: Sistema de Cuidado de la Salud, o Sistema de Contabilidad, o Sistema de Gestión de Inventario? ¿O gritan: Rails o Spring / Hibernate o ASP? - ### Good: Structure your solution by self-contained components + ### Bien: Estructura tu solución en componentes autónomos ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebycomponents.PNG "Structuring solution by components")

-### Bad: Group your files by technical role +### Bad: Agrupa tus archivos por rol técnico ![alt text](https://github.com/i0natan/nodebestpractices/blob/master/assets/images/structurebyroles.PNG "Structuring solution by technical roles") diff --git a/sections/projectstructre/wraputilities.md b/sections/projectstructre/wraputilities.md index 7e4207b7c..c7f83ad91 100644 --- a/sections/projectstructre/wraputilities.md +++ b/sections/projectstructre/wraputilities.md @@ -3,7 +3,7 @@

### Un párrafo explicativo -Una vez que comienzas a crecer y tienes diferentes componentes en diferentes servidores que consumen utilidades similares, debes comenzar a administrar las dependencias: ¿cómo puedes conservar 1 copia de tu código de utilidad y permitir que múltiples componentes de consumo lo usen y lo implementen? bueno, hay un marco para eso, se llama NPM ... Comienza envolviendo paquetes de utilidad de terceros con tu propio código para que sea fácilmente reemplazable en el futuro y publiqca tu propio código como paquete privado de NPM. Ahora, toda su base de código puede importar ese código y beneficiarse del framework de gestión de dependencias gratuito. Es posible publicar paquetes de NPM para tu propio uso privado sin compartirlo públicamente utilizando [módulos privados] (https://docs.npmjs.com/private-modules/intro), [registro privado] (https: //npme.npmjs .com / docs / tutorials / npm-enterprise-with-nexus.html) o [paquetes locales de NPM] (https://medium.com/@arnaudrinquin/build-modular-application-with-npm-local-modules-dfc5ff047bcc ) +Una vez que comienzas a crecer y tienes diferentes componentes en diferentes servidores que consumen utilidades similares, debes comenzar a administrar las dependencias: ¿cómo puedes conservar 1 copia de tu código de utilidad y permitir que múltiples componentes de consumo lo usen y lo implementen? bueno, hay un marco para eso, se llama NPM ... Comienza envolviendo paquetes de utilidad de terceros con tu propio código para que sea fácilmente reemplazable en el futuro y publiqca tu propio código como paquete privado de NPM. Ahora, toda su base de código puede importar ese código y beneficiarse del framework de gestión de dependencias gratuito. Es posible publicar paquetes de NPM para tu propio uso privado sin compartirlo públicamente utilizando [módulos privados] (https://docs.npmjs.com/private-modules/intro), [registro privado] (https://npme.npmjs.com/docs/tutorials/npm-enterprise-with-nexus.html) o [paquetes locales de NPM] (https://medium.com/@arnaudrinquin/build-modular-application-with-npm-local-modules-dfc5ff047bcc )