1. Launch and connect to EC2 instance running Amazon Linux 2.
2. Promote to root and edit /etc/ssh/sshd_config
## sudo vi /etc/ssh/sshd_config
3. Edit line 17 (usually 17) #PORT 22. You'll need to un-comment the line and change the port to whatever you like.
## PORT 9222
4. Save changes and exit
## :wq
// try here http://pdfmake.org/playground.html | |
var dd = { | |
content: [ | |
{ | |
columns: [ | |
{ | |
image: | |
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAABjCAYAAADeg0+zAAAACXBIWXMAABYlAAAWJQFJUiTwAAAQbUlEQVR42u1dh3tUVRbnf9hvv5WuJBAkhZKEJEAoZkICBKWpVAUERClSFQgl9CZIjYAiuAvLoq4FdEURRQQVFUGa9A5SpUsJ4ez9nXn35c3kvZk3aQQ49/t+32TevHLL+d1T7rkvZWrEPkECgcAeZaQTBAIhiEAgBBEIhCACgRBEIBCCCARCEIFACCIQCEEEAiGIQCAQgggEQhCBQAgiEAhBBAIhiEAgBBEIhCACgRBEIBCCCARCEOkIgUAIIhAIQQQCIYhAIAQRCIQgAoEQRCAQgggEQhCBQAgiEAiEIAKBEEQgEIIIBEIQgUAIIhAIQQQPOh6v08TVMSFIATuzuO7t9Cy35xXmOQVtZyjXBTq3IL/heEGeHxmXQlHxHh/g2P1IlDL3khi6s6rXbkzVajaiiFqNqJofIiyfOF93Pj7dDnoEX9/YdtDz6tCE6xCqYOrz8Il6oi3+z7F+Rvi1y7+t+notWG7r4v8M/34LRlzb61z2hXVc8D0sqgFVikigitXqMvA3jul2RcbdP0QpFRqkTr1mlNj4SYpLbmGLeAWcg/MfrZFEFVSnV41pyJ0daJbTv9Vt1JJiGzQPeF7NhKZch2ACFUhAcH2tpDRTyO0EEe1JUPWxayfqGF03lcKiG1DFCK9wgdhuiaJ/r9swgxJUXYD45AzXGqRuw5aW61pQjTrurkP9MB4YFxxLb9WFuvceQv0Gj2J06z2Y0p7qzP2Cc6rVbBgS+R9agkTFp1Dlx5NowdvL6Pr1v+jSpct09dp1W1y5cpX+vHiJtmzbQVN |
- Install Git
- Install Nginx
- Setup Nginx as a Reverse Proxy for your Node.js Application
- Install Node using NVM
- Install PM2
- Run a Dummy API Server Using express
- Start the Server using PM2
- Auto Start PM2 after a server reboot.
- go to terminal in your project folder.
a. run
git config credential.helper store
- run
git pull
- input your username and password
- go back to sourceTree and run Fetch or Pull, it does not ask your password again.
OR
You need to update SourceTree config via "Preferences -> Git tab -> Git Version" by select "Use System Git", then everything works well
Reading Big Files in Node.js is a little tricky. Node.js is meant to deal with I/O tasks efficiently and
not CPU intensive computations. It is still doable though but I'd prefer doing such tasks in languages like python, R etc.
Reading, Parsing, Transforming and then Saving large data sets (I'm talking millions of records here) can be done in
a lot of ways but only a few of those are efficient. Following snippet is able to parse millions of records without
wasting a lot of CPU (15% - 30% max) and (40 MB - 60 MB max) memory. It is based on Streams
.
The following program expects the input to be a csv file source eg. big-data.unpr.csv
It saves the result as ndjson and not json as working with huge datasets is easier when done using ndjson format.
/** | |
* Retries a promise n no. of times before rejecting. | |
*/ | |
async function retryPromise(promise, nthTry) { | |
try { | |
const res = await promise; | |
return res; | |
} catch (e) { | |
if (nthTry === 1) { | |
return Promise.reject(e); |
#listener { | |
width: 45px; | |
height: 45px; | |
border: 1px solid black; | |
} |
function splitPath(path) { | |
const paths = []; | |
for (let i = 0; i < path.length; i++) { | |
const c = path[i]; | |
if (c === ']') { | |
continue; | |
} |
export HOME="/Users/<USERNAME>" | |
export PATH=$HOME/bin:/usr/local/bin:$PATH | |
export PATH="/opt/local/bin:/opt/local/sbin:$PATH" | |
export PATH="$HOME/.yarn/bin:$HOME/.config/yarn/global/node_modules/.bin:$PATH" | |
# Path to your oh-my-zsh installation. | |
export ZSH="$HOME/.oh-my-zsh" | |
export LANG=en_US.UTF-8 | |
export LC_ALL=en_US.UTF-8 |
.file-node-content { | |
padding: 9px 4px; | |
margin: 0; | |
display: flex; | |
align-items: center; | |
} | |
.file-tree-node > .file-node-children { | |
padding-left: 12px; |