diff --git a/contextual_reranker_tutorial.ipynb b/contextual_reranker_tutorial.ipynb new file mode 100644 index 0000000000..0519ecba6e --- /dev/null +++ b/contextual_reranker_tutorial.ipynb @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/docs/tutorials/contextual-reranker/contextual_reranker_tutorial.ipynb b/docs/docs/tutorials/contextual-reranker/contextual_reranker_tutorial.ipynb new file mode 100644 index 0000000000..7b57d5c212 --- /dev/null +++ b/docs/docs/tutorials/contextual-reranker/contextual_reranker_tutorial.ipynb @@ -0,0 +1,521 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Contextual AI Reranker Integration with DSPy\n", + "\n", + "This tutorial demonstrates how to integrate Contextual AI's instruction-following reranker with DSPy for improved retrieval-augmented generation (RAG) performance. We'll show how DSPy's optimization capabilities can work with Contextual AI's reranking to achieve better results.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import dspy\n", + "import os\n", + "import requests\n", + "import ujson\n", + "import random\n", + "from dspy.utils import download\n", + "from dspy.evaluate import SemanticF1\n", + "\n", + "# Set API keys\n", + "# Get this key at http://app.contextual.ai/\n", + "CONTEXTUAL_API_KEY = \"your_contextual_api_key\"\n", + "OPENAI_API_KEY = \"your_openai_api_key\"\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY\n", + "\n", + "lm = dspy.LM(model=\"openai/gpt-4o-mini\", api_key=OPENAI_API_KEY)\n", + "dspy.settings.configure(lm=lm)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Contextual AI Reranker Implementation\n" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [], + "source": [ + "class ContextualReranker(dspy.Retrieve):\n", + " def __init__(self, api_key, base_retriever=None, k=5, rerank_instructions=\"\"):\n", + " super().__init__(k=k)\n", + " self.api_key = api_key\n", + " self.base_retriever = base_retriever\n", + " self.rerank_instructions = rerank_instructions\n", + " \n", + " def forward(self, query_or_queries):\n", + " if self.base_retriever:\n", + " # Get initial documents from base retriever\n", + " initial_docs = self.base_retriever(query_or_queries)\n", + " documents = initial_docs.passages\n", + " else:\n", + " documents = query_or_queries if isinstance(query_or_queries, list) else [query_or_queries]\n", + " \n", + " url = \"https://api.contextual.ai/v1/rerank\"\n", + " headers = {\n", + " \"accept\": \"application/json\",\n", + " \"content-type\": \"application/json\",\n", + " \"authorization\": f\"Bearer {self.api_key}\"\n", + " }\n", + " \n", + " payload = {\n", + " \"query\": query_or_queries if isinstance(query_or_queries, str) else query_or_queries[0],\n", + " \"documents\": documents,\n", + " \"model\": \"ctxl-rerank-v2-instruct-multilingual\",\n", + " \"top_n\": self.k,\n", + " \"instruction\": self.rerank_instructions\n", + " }\n", + " \n", + " response = requests.post(url, headers=headers, json=payload)\n", + " result = response.json()\n", + " \n", + " if 'results' not in result:\n", + " print(\"Contextual API Error:\", result)\n", + " return dspy.Prediction(passages=documents[:self.k])\n", + " \n", + " reranked_results = result['results']\n", + " reranked_results.sort(key=lambda x: x['relevance_score'], reverse=True)\n", + " \n", + " top_docs = []\n", + " for item in reranked_results[:self.k]:\n", + " doc = documents[item['index']]\n", + " if isinstance(doc, str):\n", + " top_docs.append(doc)\n", + " else:\n", + " top_docs.append(doc.get('content', doc))\n", + " \n", + " return dspy.Prediction(passages=top_docs)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load Dataset\n" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded 500 documents\n" + ] + } + ], + "source": [ + "# Download and load the RAG-QA Arena Tech dataset\n", + "download(\"https://huggingface.co/dspy/cache/resolve/main/ragqa_arena_tech_corpus.jsonl\")\n", + "\n", + "with open(\"ragqa_arena_tech_corpus.jsonl\") as f:\n", + " corpus_lines = f.readlines()[:500]\n", + "\n", + "corpus = []\n", + "for line in corpus_lines:\n", + " data = ujson.loads(line)\n", + " corpus.append(data['text'])\n", + "\n", + "print(f\"Loaded {len(corpus)} documents\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading 'ragqa_arena_tech_examples.jsonl'...\n", + "Training: 200, Dev: 300, Test: 500\n" + ] + } + ], + "source": [ + "# Load question-answer pairs for evaluation\n", + "download(\"https://huggingface.co/dspy/cache/resolve/main/ragqa_arena_tech_examples.jsonl\")\n", + "\n", + "with open(\"ragqa_arena_tech_examples.jsonl\") as f:\n", + " data = [ujson.loads(line) for line in f]\n", + "\n", + "data = [dspy.Example(**d).with_inputs('question') for d in data]\n", + "\n", + "# Split data for training, validation, and testing\n", + "random.Random(0).shuffle(data)\n", + "trainset, devset, testset = data[:200], data[200:500], data[500:1000]\n", + "\n", + "print(f\"Training: {len(trainset)}, Dev: {len(devset)}, Test: {len(testset)}\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup Base Retriever\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create base embedding retriever\n", + "embedder = dspy.Embedder('openai/text-embedding-3-small', dimensions=512, api_key=OPENAI_API_KEY)\n", + "base_search = dspy.retrievers.Embeddings(embedder=embedder, corpus=corpus, k=10)\n", + "base_search_wide = dspy.retrievers.Embeddings(embedder=embedder, corpus=corpus, k=50)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create RAG Systems\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class RAG(dspy.Module):\n", + " def __init__(self, retriever):\n", + " super().__init__()\n", + " self.retriever = retriever\n", + " self.respond = dspy.ChainOfThought('context, question -> response')\n", + " \n", + " def forward(self, question):\n", + " if hasattr(question, 'question'):\n", + " question = question.question\n", + " elif isinstance(question, dict) and 'question' in question:\n", + " question = question['question']\n", + " context = self.retriever(question).passages\n", + " return self.respond(context=context, question=question)\n", + "\n", + "base_rag = RAG(base_search)\n", + "\n", + "# RAG with Contextual AI reranking -> Changing the prompt can yield different results\n", + "contextual_reranker = ContextualReranker(\n", + " api_key=CONTEXTUAL_API_KEY,\n", + " base_retriever=base_search_wide,\n", + " k=10,\n", + " rerank_instructions=\"Prioritize documents that provide specific, actionable technical solutions and step-by-step instructions.\"\n", + ")\n", + "reranked_rag = RAG(contextual_reranker)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Setup\n" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "# Setup evaluation metric\n", + "metric = SemanticF1(decompositional=True)\n", + "evaluate = dspy.Evaluate(devset=devset, metric=metric, num_threads=4, display_progress=True)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Baseline Evaluation\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test the Reranker\n", + "\n", + "Let's test our reranker with a simple example to see how it works.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Query: How do I fix my Linux system\n", + "\n", + "Base retrieval results:\n", + "1. For some reason the disk my Mac was booting from was the macOS Installer Disk, all I had to do: Hold down the option (-alt) key before the Apple logo shows. Then just select your Macintosh HD (or how ...\n", + "\n", + "2. In addition to the preceding answers, which mention only Windows, and since theres a dup-closed question Does WannaCry infect Linux? pointing to this one, Id like to add that Linux machines can get in...\n", + "\n", + "3. Riffing off of Prabhats question above, I had this issue in macos high sierra when I stranded an encfs process, rebooting solved it, but this ps -ef | grep name-of-busy-dir Showed me the process and t...\n", + "\n", + "Reranked results:\n", + "1. You dont typically clear the journal yourself. That is managed by systemd itself and old logs are rotated out as new data comes in. The correct thing to do would be to schedule journald to only keep a...\n", + "\n", + "2. You need to give permission to the path. run this in command line and you will be fine to go. It worked for me: sudo chown -R $USER /usr/local...\n", + "\n", + "3. My resolution was similar to Roman Ts however I needed to add a few extra steps. In my case I had Ubuntu Server 14 VM running on a Windows 8 Desktop in Windows 2008 domain. If I tried NAT or Bridge I ...\n", + "\n" + ] + } + ], + "source": [ + "# Test query - something relevant to the tech corpus\n", + "test_query = \"How do I fix my Linux system\"\n", + "\n", + "print(f\"Query: {test_query}\\n\")\n", + "\n", + "base_results = base_search(test_query)\n", + "print(\"Base retrieval results:\")\n", + "for i, doc in enumerate(base_results.passages[:3]):\n", + " print(f\"{i+1}. {doc[:200]}...\\n\")\n", + "\n", + "reranked_results = contextual_reranker(test_query)\n", + "print(\"Reranked results:\")\n", + "for i, doc in enumerate(reranked_results.passages[:3]):\n", + " print(f\"{i+1}. {doc[:200]}...\\n\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Analyzing the Results\n", + "\n", + "Notice the difference in quality between the two responses:\n", + "\n", + "**Base Retrieval Issues:**\n", + "- Retrieved documents about macOS booting issues (not Linux-specific)\n", + "- Included content about WannaCry malware (not relevant to fixing systems)\n", + "- Retrieved macOS High Sierra troubleshooting (wrong OS)\n", + "\n", + "**Reranked Retrieval Improvements:**\n", + "- Focused on systemd journal management (actual Linux system administration)\n", + "- Provided file permission fixes with specific commands (`sudo chown`)\n", + "- Delivered actionable solutions for common Linux issues\n", + "\n", + "**Impact on Response Quality:**\n", + "- **Base RAG**: Gave generic troubleshooting steps not specific to the retrieved context\n", + "- **Reranked RAG**: Provided concrete, actionable Linux commands with file paths and configuration details\n", + "\n", + "The reranker successfully filtered out irrelevant OS-specific content and prioritized documents with \"specific, actionable technical solutions\" as specified in our rerank instructions.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Base RAG Response:To fix your Linux system, start by identifying any running processes that might be causing issues. You can use the command `ps aux` to list all processes. If you suspect a specific process is problematic, you can terminate it using `sudo kill -15 ` where `` is the process ID. Additionally, check your disk partitions with `df -Th` to ensure they are correctly formatted and mounted. If you encounter file system errors, running `fsck` can help resolve them. If these steps do not resolve the issue, consider backing up your data and performing a clean installation of the operating system.\n" + ] + } + ], + "source": [ + "# Same Query\n", + "base_response = base_rag(question=test_query)\n", + "reranked_response = reranked_rag(question=test_query)\n", + "\n", + "print(f\"Base RAG Response:{base_response.response}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Reranked RAG Response:To fix your Linux system, first identify the specific issue you're encountering. If it's a permission problem, use `sudo chown -R $USER /path/to/directory` to change ownership. For network issues, ensure your network interfaces are configured correctly in `/etc/network/interfaces` and restart the networking service. If you're having trouble with packages, use `dpkg-deb` to manage them. For log management, adjust the settings in `/etc/systemd/journald.conf` to control log size. If you provide more details about the specific problem, I can offer more targeted advice.\n" + ] + } + ], + "source": [ + "print(f\"Reranked RAG Response:{reranked_response.response}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Quantitative Evaluation Results\n", + "\n", + "Now let's compare the performance of different RAG configurations on our development set.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 121.75 / 300 (40.6%): 100%|██████████| 300/300 [16:48<00:00, 3.36s/it]" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025/10/13 12:36:26 INFO dspy.evaluate.evaluate: Average Metric: 121.75266058090479 / 300 (40.6%)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "text/plain": [ + "EvaluationResult(score=40.58, results=)" + ] + }, + "execution_count": 45, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "evaluate(base_rag)" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Evaluating Contextual AI reranked RAG system...\n", + "Average Metric: 127.27 / 300 (42.4%): 100%|██████████| 300/300 [20:48<00:00, 4.16s/it]" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025/10/13 13:44:46 INFO dspy.evaluate.evaluate: Average Metric: 127.27461854472807 / 300 (42.4%)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print(\"Evaluating Contextual AI reranked RAG system...\")\n", + "reranked_score = evaluate(reranked_rag)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Results Comparison\n", + "\n", + "| Configuration | SemanticF1 Score | Improvement | Description |\n", + "|--------------|------------------|-------------|-------------|\n", + "| **Base RAG (CoT)** | **40.6%** | Baseline | Embeddings retrieval + Chain-of-Thought |\n", + "| **Reranked RAG** | **42.4%** | **+1.8%** | Contextual AI reranking + Chain-of-Thought |\n", + "\n", + "### Understanding the Results\n", + "\n", + "**Why scores seem low:** F1 score is strict—it penalizes different wording even when meaning is correct. Scores of 40-45% are typical for RAG systems since there are many correct ways to answer a question, but F1 only compares to one reference answer.\n", + "\n", + "**What matters:** \n", + "With only 500 documents and answers to choose from this improvement is impressive for a reranker. Our 1.8 percentage point gain shows the reranker consistently produces better answers across 300 test questions—that's 2 additional correct answers per 100 queries with minimal cost and latency.\n", + "\n", + "**Why it works:**\n", + "- Reranker examines 50 documents instead of 10\n", + "- Finds better technical content ranked lower by embeddings alone\n", + "- Uses instructions to prioritize actionable, step-by-step solutions\n", + "\n", + "**Note:** In RAG research, 2-5% improvements are meaningful when building on strong baselines. Larger corpora (1000+ docs) typically show 5-10% gains.\n", + "\n", + "---\n", + "\n", + "**Key Insights:**\n", + "\n", + "1. **Instruction-Following Advantage**: Contextual AI's reranker lets you:\n", + " - Customize ranking through natural language instructions\n", + " - Adapt to domains without retraining\n", + " - Understand context (e.g., \"fix\" means troubleshooting, not just semantic similarity)\n", + "\n", + "2. **When to Use Reranking:**\n", + " - Domain-specific requirements (e.g., \"prioritize official docs\" or \"prefer recent content\")\n", + " - Initial retrieval returns too many marginally relevant documents (**in this case there was only 500 documents**)\n", + " - Need filtering by criteria embeddings can't capture (metadata, recency, source authority)\n", + " - Working with larger corpora where improvement potential is higher\n", + "\n", + "The reranker's instruction-following capability means you can adapt to different use cases by just changing instructions—no retraining required!" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/ragqa_arena_tech_corpus.jsonl b/ragqa_arena_tech_corpus.jsonl new file mode 100644 index 0000000000..9764333897 --- /dev/null +++ b/ragqa_arena_tech_corpus.jsonl @@ -0,0 +1,28436 @@ +{"doc_id": 229377, "author": "Gilles 'SO- stop being evil'", "text": "The primary command to manipulate deb packages is dpkg-deb. To unpack the package, create an empty directory and switch to it, then run dpkg-deb to extract its control information and the package files. Use dpkg-deb -b to rebuild the package. mkdir tmp dpkg-deb -R original.deb tmp # edit DEBIAN/postinst dpkg-deb -b tmp fixed.deb Beware that unless your script is running as root, the files permissions and ownership will be corrupted at the extraction stage. One way to avoid this is to run your script under fakeroot. Note that you need to run the whole sequence under fakeroot, not each dpkg-deb individually, since its the fakeroot process that keeps the memory of the permissions of the files that cant be created as they are. fakeroot sh -c mkdir tmp dpkg-deb -R original.deb tmp # edit DEBIAN/postinst dpkg-deb -b tmp fixed.deb Rather than mess with permissions, you can keep the data archive intact and modify only the control archive. dpkg-deb doesnt provide a way to do that. Fortunately, deb packges are in a standard format: theyre ar archives. So you can use ar to extract the control archive, modify its files, and use ar again to replace the control archive by a new version. mkdir tmp cd tmp ar p ../original.deb control.tar.gz | tar -xz # edit postinst cp ../original.deb ../fixed.deb tar czf control.tar.gz *[!z] ar r ../fixed.deb control.tar.gz You should add a changelog entry and change the version number if you modify anything in the package. The infrastructure to manipulate Debian packages assumes that if two packages have the same name and version, theyre the same package. Add a suffix to the debian_revision part at the end of the version number; for sorting reasons the suffix should start with ~, e.g. 1.2.3-4.1 becomes 1.2.3-4.1~johnjumper1. Instead of using shell tools, you can use Emacs. The dpkg-dev-el package (which is its own upstream as this is a native Debian package) contains modes to edit .deb files and to edit Debian changelogs. Emacs can be used interactively or scripted."} +{"doc_id": 6, "author": "Adam Dempsey", "text": "You can use StartSound.PrefPane which basically just sets the volume to 0 when you shutdown and then turns it back up after login."} +{"doc_id": 131078, "author": null, "text": "http://abtevrythng.blogspot.com/2010/06/adding-cer-certificates-on-your-android.html Shows how to actually achieve this. Worked fine for me. Try it out. In this article .cer to .pfx (which is what you need on Android) conversion is given. Simple method is given using which you can convert .cer to .pfx and use it to connect to the Wi-Fi network. Plus you dont need any Key to convert .cer to .pfx!!!"} +{"doc_id": 65554, "author": "user132360", "text": "In App purchases are not shared in Family sharing and its clearly given in apple website. But what we can do is log in to iTunes & App store using the ID which was used to buy the in app purchases without having to change the primary ID of the device and still enjoy the in app purchases"} +{"doc_id": 34, "author": "Shane Stillwell", "text": "I would recommend installing QuickSilver. Its an application launcher that will remember the applications you launch most frequent and recommend them first. Its easy to launch any application with a few keystrokes. QuickSilver is the first Application I install on every new Mac."} +{"doc_id": 35, "author": "Scott", "text": "Not built-in to the OS, but Ive been using a free utility called Visor. What you do with it is leave your Terminal running in the background, but Visor hides it and invokes it in a Quake-style console when you hit a (user-configurable) key combo. Its pretty customizable as to how your Terminal shows/hides. Super awesome."} +{"doc_id": 37, "author": "jdiaz", "text": "If you enable screen locking within the screen saver pref pane you can put the computer to sleep and lock at the same time by pressing \u2318+\u2325+F12"} +{"doc_id": 38, "author": "Josh K", "text": "\u21e7+\u2318+\u2325+Q will perform a quick logout. Another option is to enable a password when waking from sleep or screen saver and adding a hot corner for one or the other. Then locking is as simple as tossing a mouse in a corner."} +{"doc_id": 39, "author": "Scott", "text": "Via the Expose system preferences panel you can set a hot corner of your monitor that activates your screen saver. Say you set the Bottom Left corner, as soon as you move your mouse there the screen saver will invoke. If youve set a password on it, bingo, done. Yet another option is to enable Fast User Switching in the Login Options pane of the Accounts preferences panel. This puts a Users menu in the top right hand corner, from which you can quickly choose Login Window.... This kicks you to the login screen, requiring a password to move away from, and also does not end your session/quit any running apps."} +{"doc_id": 32805, "author": "TJ Luoma", "text": "I think the answer to your question is No, there isnt a command line way to do this. Because this is a menu bar item, its not something that you can access easily using Keyboard Maestro or another similar tool. However, if you are not averse to a solution using a 3rd party app, QuickLock will let you do this. Its a free (donations accepted) app which will let you assign a keyboard shortcut to lock the screen. Theres also a menu bar item which you can click to lock the screen. The app will let you set a password (separate from your account password). It does not require the use of password with screensaver, its all completely separate. You can see a video of it in action at http://www.youtube.com/watch?v=SBHwykPB19o p.s. when the screen is locked, type your password to unlock it. I was confused because I expected to see a password field. There is none. You just type the password. p.p.s. Ive only used this for a few minutes, so there may be some other issues with it Im not aware of, but it seems to fit the bill."} +{"doc_id": 44, "author": "zneak", "text": "Your Mac can ask your password after it wakes up if you set it to do so in the Preferences panel (Security). From there on, you can use \u2325+\u2318+\u23cf to put your Mac to sleep. So all you have to do is hit that and walk away."} +{"doc_id": 46, "author": "Nagon", "text": "On macOS High Sierra, there is a standard key sequence and Apple menu item to lock your screen. Control-Command-Q or ^+\u2318+Q For older OS, \u21e7+\u2303+\u23cf puts the display (only the display, not the whole computer) to sleep and will then prompt you for a password if you have enabled Require password [amount of time] after sleep or screen saver begins under System Preferences > Security. If your Mac does not have an \u23cf (eject) key, you can use \u21e7+\u2303+\u233d (power)."} +{"doc_id": 589874, "author": "WoJ", "text": "Of course it is not, as other answers pointed out. Your first efforts should go into changing the requesters mind. If you are, despite your efforts, somehow forced to provide your password - get that request in writing. You can then reply, also in writing, that you will provide the requester this information in a sealed enveloppe and that from that moment on you are not responsible for any actions performed via this account, whose password just became public knowledge at the request of management. It is likely that you have in the past accepted (directly or indirectly) that you are in charge of the account, which is accessed by a password you are the only one to know. You have also probably accepted that you would not share this account."} +{"doc_id": 557112, "author": "Randall Krieg", "text": "Heres another bit of evidence you can trust DDG if youre paranoid: they make it easy to control what information gets sent to the target host when you follow a link. Maybe you cant really know what theyre keeping in their logs, but you CAN know how they treat your interaction with the link targets, and you can control it if you like. Try this on both DuckDuckGo and Google, with Javascript enabled: 1. search for something 2. hover over one of the result links and check the address in the status bar 3. right-click one of the links, and look at the address in the status bar again On DuckDuckGo, the links are what they say they are. If you want to keep DuckDuckGo from seeing what youre clicking on, and if you want the target to be unaware that you came in via DuckDuckGo, you can do so easily: just copy the link in step 3, paste it into your address bar, and there you are -- no further interaction with DuckDuckGo and no referrer URL sent to the target site. On Google, however, note that in #2 they show you the link you expect, but in step #3 the link suddenly changes to a google.com address with a ton of gobbledeygook. This is also the address that gets used if you click the link normally. The only way to get the real target address directly is to hover over it and retype it yourself instead of clicking it. The Google client-side script is specifically designed to make sure you hit the Google tracking server before being redirected to the page you really wanted, and moreover, TO HIDE THE FACT THAT IT IS DOING SO from the vast majority of users. This is the action Google took a few years ago that finally caused me to use DDG exclusively. I understand Googles need to monetize its service, but when it purposely hides this fundamental mechanism they prove they are not trustworthy. So you can have your choice: use a site that says its not tracking you, and seems to be carrying out its promise; or use a site that tracks you every way it can, tells you its doing so, and makes it as hard as possible to evade it. The choice is obvious."} +{"doc_id": 62, "author": "Chealion", "text": "Taken from my answer at Server Fault: Mac OS X Memory Jargon: Wired : This refers to kernel code and such. Memory that should not ever be moved out of the RAM. Also know as resident memory. Shared : Memory that is shared between two or more processes. Both processes would show this amount of memory so it can be a bit misleading as to how much memory is actually in use. Real : This is the real memory usage for an application as reported by task_info() - a rough count of the number of physical pages that the current process has. (RSIZE) Private : This is memory that a process is using solely on its own that is used in Resident memory. (RPRVT) Virtual : The total amount of address space in the process thats mapped to anything - whether thats an arbitrarily large space for variables or anything - it does not equate to actual VM use. (VSIZE) Active : Memory currently labelled as active and is used RAM. Inactive : Inactive memory is no longer being used and has been cached to disk. It will remain in RAM until another application needs the space. Leaving this information in RAM is to your advantage if you (or a client of your computer) come back to it later. - Mac OS X Help Free : The amount of RAM actually available without any data. The best documentation I know of (and have been able to find in followup research) is Apples own Managing Memory article on their developer website. Other worthwhile sources: Darwin-dev mailing list: [1], [2] and an old article on MacOSXHints. Additionally Mike Ash has posted a good laymans introduction on his blog"} +{"doc_id": 163903, "author": "HRJ", "text": "None of the solutions mentioned here are working as of Feb 2018. So I created my own simple solution which I will document below. TL;DR: use AppBrains API. The long answer Create a folder called appSearch, for example. Sign up for an account on AppBrain. Then go to your dev dashboard. It will prompt you to enable the Developer option for your account. Navigate to the API access page. Copy the authentication field, which looks like di=xxxx&t=zzzzz and store the values into a file called appBrain.json in the following format: { DI : xxxx, TOKEN : yyyy } Now download filter.js and save it to the same (appSearch) folder. Install Java 8+ Run the following command: jjs -scripting filter.js -- keyword, where keyword can be any search term. You should see a list of 50 apps that match your search term and are sorted by the number of permissions, with a link to their play store page. Edit: The code in filter.js is relatively simple and can be tailored to suit your own criteria, if you know a bit of programming. I will try to make it more configurable in the future. The free tier allows limited number of queries per day. Still, you will be able to make about 30 searches per day, which is enough for personal use."} +{"doc_id": 64, "author": "Robert S Ciaccio", "text": "Real mem relates to physical memory (actual RAM modules in your computer). Virtual Mem is how much fake memory is allocated to the process, meaning memory that is allocated on the permanent storage medium (hard drive, solid state drive, etc) for that process. Shared memory is physical (Real) memory that can be shared with other processes. Private memory is real memory that can only be used by the process it is allocated to. These explanations may help as well... directly from activity monitor --> help --> viewing system memory usage: Here is an explanation of some of the information displayed at the bottom of the memory pane: Wired: Wired memory contains information that must always stay in RAM Active: Active memory that contains information that is actively being used. Inactive: Inactive memory contains information that is not actively being used. Leaving this information in RAM is to your advantage if you (or a client of your computer) come back to it later. Used: Used memory is being used by a process or by the system. Used memory is the sum of wired, active, and inactive memory. If the system requires memory it takes free memory before used memory. Free: Free memory is not being used and is immediately available. VM size: Virtual memory, or VM, is hard disk space that can be used as memory. VM size is the amount of disk space being used as memory. Mac OS X can use more memory than the amount of physical RAM you have. A hard disk is much slower than RAM, so the virtual memory system automatically distributes information between disk space and RAM for efficient performance. Page ins/outs: The number of gigabytes of information Mac OS X has moved between RAM and disk space"} +{"doc_id": 70, "author": "Asbj\u00f8rn Ulsberg", "text": "If you want to be able to remote control your Mac (with Synergy or something similar) even when its locked, Id recommend you to show the Keychain Status in the Menu Bar. You do that as follows: Launch the application Keychain Access Press \u2318, to open up the Preferences window Tick the Show Status in Menu Bar check box Click the newly appeared lock icon in the menu bar Click Lock Screen to lock the screen This will lock the screen with a login window, but still make the Mac possible to remote control. If you dont need to remote control the Mac, MacLoc is a simple and effective solution."} +{"doc_id": 262219, "author": "Andrew Henle", "text": "No, a UUID cant be guaranteed to be unique. A UUID is just a 128-bit random number. When my computer generates a UUID, theres no practical way it can prevent your computer or any other device in the universe from generating that same UUID at some time in the future. It may not be likely that any two UUIDs will be the same, but they can be. In this case, the code assigning UUIDs to devices may check a new UUID against any existing UUIDs already assigned to devices and throw away duplicates, guaranteeing uniqueness that way. Regarding uniqueness. When a new disk is installed to a system, the usual practice is for a technician to define the partitions thereon. Labels are not unique and are not used. In the definition process, a software algorithm creates a UUID. As the system already has a list of existing mounted UUIDs, the process of defining the partition automatically creates a unique UUID value. The new UUID value filtered against the existing list of mounted UUIDs so as to avoid duplication. With partition creation,on the host system that UUID coupled with the partition number form a unique combination. Yes really. Duplication could occur if the disk is prepared on system A for use on an existing system B."} +{"doc_id": 196692, "author": "Dimitry", "text": "This is more correct: find . -iregex .*\\.\\(jpg\\|gif\\|png\\|jpeg\\)$"} +{"doc_id": 426073, "author": "aceinthehole", "text": "Consulting/Contracting No one has mentioned the particular case of consulting/contracting. If you are in this category or you would like to be, this can definitely help you. You are easier to sell on a project if your boss or sales guy can pitch you as being certified in niche category X. That being said you are not necessarily better or more knowledgeable on a particular topic if you are not certified, and I agree with others who might take a dubious take on them in general. Although, having a few MS certifications I can say for you, if you crank on them on a technology that you are trying to learn, it really forces you to get up to speed."} +{"doc_id": 65630, "author": "Motsel", "text": "For all the googlers... check out Beyond Compare it rules. Costs 30$ or 50$ for Pro version."} +{"doc_id": 65632, "author": "Jaime Santa Cruz", "text": "The optimization feature is supposed to free up space automatically by keeping low res versions of your pictures. From the Apple support site: If you turn on Optimize [device] Storage, iCloud Photo Library will automatically manage the size of your library on your device, so you can make the most of your devices storage and access more photos than ever. All of your original, full-resolution photos and videos are stored in iCloud while device-size versions are kept on your device. You can download the original photos and videos over Wi-Fi or cellular when you need them."} +{"doc_id": 295009, "author": "Dennis Estenson", "text": "man pages can be very informative. Dont be intimidated by them. Among everything else, man less says you can use the R command to: R Repaint the screen, discarding any buffered input. Useful if the file is changing while it is being viewed. (I realize this question is over 6 years old, but it comes up on google searches, so Im not the only one that clicked the link to get here.)"} +{"doc_id": 32866, "author": "iolsmit", "text": "I use BetterTouchTool for that purpose and assigned a keyboard shortcut to Switch to login screen (which is lock screen): doesnt fulfill the command line requirement but may be helpful to you anyway."} +{"doc_id": 99, "author": "NReilingh", "text": "As of Snow Leopard, this actually is built into the OS. Launch Automator and create a service that receives no input from any application. From the Actions Library, add the Launch Application action to the workflow. Select the Terminal application in the drop-down list of Applications. Save your new service and then assign a keyboard shortcut to it in: System Preferences -> Keyboard -> Keyboard Shortcuts -> Services"} +{"doc_id": 295015, "author": "starfry", "text": "The shortest and simplest way to delete the first line from a file using sed is: $ sed -i -n -e 2,$p file.txt"} +{"doc_id": 107, "author": "intlect", "text": "ctrl+F2: Access the menu via the keyboard (Windows alt+space equivalent)."} +{"doc_id": 393325, "author": "MIA", "text": "Either work to well defined sprints, or deliberately choose a Kanban approach. Dont accidentally end up in Kanban Bugs first, features second. Still keep a focus on Value vs. feature bloat. (YAGNI over Gold Plating) Retrospectives are just as valuable. And just as importantly, make process changes in small chunks. Dont decide that today youre going start to go TDD, Mock and IoC in one shot unless you really have no external features to deliver ATM. Bring one in at a time. Ultimately, I define Agile really as doing what makes sense for your team and customer and not adhering to old practices because they happened to look like they worked in the past."} +{"doc_id": 110, "author": "Jacob Gorban", "text": "\u2318+E: Put selected text into search clipboard. Then \u2318 + G to find next. In combination with regular copy-paste you can do selective search and replace very quickly and conveniently."} +{"doc_id": 111, "author": "Eimantas", "text": "ctrl+\u2325+\u2318+8: High contrast theme. \u2318+` (backtick): Cycle through apps windows."} +{"doc_id": 114, "author": "robsoft", "text": "I havent noticed that sound on my MacBook Pro for ages, and today I figured out why. The MBP seems to remember 2 sets of volume settings; both for having-no-headphones-plugged-in, and for having-headphones-plugged-in. I usually have my external speakers plugged in-when Im at home, and when Im travelling/way from home obviously I dont. At some point in the past I have turned the volume down to zero when headphones werent connected, and now when I start the MBP up theres no sound. You could try this (though its not particularly practical) - turn the volume on your Mac right down to zero, then restart the computer. I suspect you wont hear the startup-sound. Like I say, not really practical but if the start-up noise annoys you enough, you might just get into the habit of turning the volume down before switching off. :-) EDIT: Just realised this this point about turning the volume down has already been made in other answers here, so feel free to ignore this!"} +{"doc_id": 262258, "author": "mrjamesmyers", "text": "My resolution was similar to Roman Ts however I needed to add a few extra steps. In my case I had Ubuntu Server 14 VM running on a Windows 8 Desktop in Windows 2008 domain. If I tried NAT or Bridge I could access the Internet but couldnt connect via SSH. If I tried Host Only Adapter then that would allow me to SSH to machine but couldnt access the Internet. I tried Port forwarding as well and no joy. Opened up Wireshark and it just wasnt finding VM. So my solution was to add a second network adapter. Method With VM powered down Click Settings > Network Click Adapter 1 and choose Bridged Adapter Click Adapter 2 and choose Host Only Adapter Click File > Preferences > Networks Under NAT Networks if you dont see a NAT Network click on + icon to add NAT Network. Click Host-Only Networks if you dont see a host only network click + icon to add one Start up VM In order to see network adapter you need to type ifconfig -a You may see the network adapter is added with a mac address but not an IP? If so then you need to edit /etc/network/interfaces in order to configure DHCP. Example below using VI/VIM but you can use editor of your choice sudo vi /etc/network/interfaces add the lines auto eth1 iface eth1 inet dhcp save and exit file. Then try restarting network service using below command sudo service networking restart Or if that fails then restart VM. Once restarted type below to see if you eth1 has been allocated an IP address ifconfig -a if so then see if you can SSH on to the VM"} +{"doc_id": 119, "author": "Am1rr3zA", "text": "\u2318+\u21e7+4: selective screenshot saved on desktop \u2318+\u21e7+ctrl+4: selective screenshot saved in clipboard"} +{"doc_id": 65657, "author": "William T Froggard", "text": "You can, but its a major security and stability risk. Doing so allows any application full access to your computer. You cant know what theyre doing with that access. Its unnecessary, and just really unsafe. For a lot more background information on this, see Why is it bad to login as root Why not run always logged in as root Why it is not recommend to use root login in linux"} +{"doc_id": 121, "author": "Studer", "text": "ctrl+A: Go to the beginning of the line (works in every Cocoa textfield) ctrl+E: Go to the end of the line (works in every Cocoa textfield) \u2318+\u21e7+H : Pop up the Home folder \u2318+\u21e7+D : Pop up the Desktop folder"} +{"doc_id": 65661, "author": "David Anderson", "text": "Generally you want to keep ownership of your personal files separate from the root user. This is why you create a account for yourself as an administrator. The accepted way, under OS X, to gain root level access is to use the sudo command from the Terminal application. For example, if you want to see the partitioning of your internal drive the command is gpt -r show /dev/disk0 which if entered will result in the following error message. gpt show: unable to open device /dev/disk0: Permission denied To use the command, you need to use sudo as shown below. sudo gpt -r show /dev/disk0 If you want to become the root user to avoid entering sudo, you can just enter sudo sh. The exit command can be used to exit from being the root user. If you want to execute an application as the root user, you can by using the Terminal application. For example, if you want to launch the Finder as the root user, enter the following command. sudo /System/Library/CoreServices/Finder.app/Contents/MacOS/Finder & To avoid the confusion of having two Finder applications open at the same time, it is usually best to quit your Finder application first. This can be done using the following terminal command. osascript -e tell application Finder to quit One word of caution: preceding a command with sudo is not the same as becoming the root user. For example, the commands sudo echo $USER sudo echo $SUDO_USER result in the same output as the commands shown below. echo $USER echo $SUDO_USER If you become the root user (the superuser), then the same commands result in a different output. This can be verified by entering the commands shown below. sudo sh echo $USER echo $SUDO_USER sudo echo $USER sudo echo $SUDO_USER exit"} +{"doc_id": 65662, "author": "Humberto Morales", "text": "Open the Terminal, you can find typing Spotlight in Terminal, and copy and paste this sentence there: rm -r /Users/$USER/Applications/Chrome\\ Apps.localized/"} +{"doc_id": 129, "author": "daefu", "text": "With the track pad, you can configure gestures for locking your mac. I do it with a four finger swipe left. To confiugre this, you need a (free) tool like the BetterTouchTool."} +{"doc_id": 131203, "author": "mattdm", "text": "Android shares very little with a typical Linux distribution. In fact, this is where Richard Stallmans GNU/Linux distinction comes in handy \u2014 Android isnt really a Unix-like general purpose operating system with a Linux kernel. Its a new system which happens to use the Linux kernel. This goes all the way down to its own custom libc implementation (called Bionic), which does not necessarily attempt POSIX compliance. This article from ZDNet covers a talk which gives a pretty good overview of the system, and although its a couple of years old its still basically correct and helpful."} +{"doc_id": 557187, "author": "sergiogarciadev", "text": "Yes, your company can monitor your SSL traffic. Other responses say that SSL is secure, indeed it is. But, your corporate proxy can intercept and inspect your encrypted traffic as denoted by the image below: This image is when I visit Google in my work computer. Here, they use the Forefront Threat Management Gateway 2010 which can intercept the connection between me and a secure site. Explanation: The SSL (Secure Socket Layer) and TLS (Transport Layer Security) security is based on PKI (Public Key Infrastruture). The PKI consists on a series of trusted certificates called root certificates. Here in my company, one of the root certificates is the certificate which Forefront generates the certificates for each website I visit. Because my computer trusts the certificate which the proxy used, no warning was generated and the connection is done securely but can be inspected by the proxy server."} +{"doc_id": 133, "author": "Tom H", "text": "\u2318+space: activate spotlight. Then you can launch any application, open most files, do quick calculations, etc."} +{"doc_id": 65686, "author": "David Richerby", "text": "Using your computer logged in as root all the time is like always carrying around all your keys, your passport, $5,000 in cash, that piece of paper with all your passwords written on it and the only photo you have of Flopsy, the adorable rabbit whose death broke your seven-year-old heart. Oh, and a chainsaw. Which is to say, its mighty convenient from time to time, because it means you can do whatever you want, whenever you want, without needing to go back home to get stuff or talk to your bank manager. But it also puts you at great risk of losing stuff, having it stolen (dont think that chainsaw will help you: youll be streets away before you notice your wallets gone), doing things you really regret later (impulse-buying plane tickets to Vegas while drunk), taking dangerous shortcuts (chainsawing through the lion enclosure fence because thats the fastest way to the pandas) and over-reacting (chainsawing your neighbours car because his dog barks too much). And, when you think about it, mostly, youre just going to the office, going grocery shopping, hanging out with your friends. You dont need all that stuff with you all the time just for the convenience of needing it, what?, once a month? Once a week? So, no, its not OK to use the root account all the time. It gives you a tiny amount of convenience but puts you in a lot of danger. Theres the danger of stupid mistakes having catastrophic results (Hey, why is rm -rf * taking so long to run? **** Im in /!). Theres the danger of acclimating yourself to the idea that all files are equal and you can just mess about with whatever you want, anywhere in the directory tree. Theres the danger that any hack to your account is immediately a hack to the whole system, so now every single piece of software on your machine is security-critical. And even if you think you dont care about your machine getting hacked (after all, that photo of Flopsy is a real piece of glossy paper, not some ephemeral JPEG), I care about your machine getting hacked because then its on the botnet thats mounting the DDOS attack against whatever internet service I cant access today. Root is your spiderman costume. It gives you great power but requires great responsibility. Its there in the closet whenever you need it, so you dont have to wear it all the time."} +{"doc_id": 65689, "author": "alexwlchan", "text": "Quoting from an Apple support article about Personal Hotspot (emphasis mine): When you connect a device to your Personal Hotspot, the status bar turns blue and shows how many devices have joined. The number of devices that can join your Personal Hotspot at one time depends on your carrier and iPhone model. I believe the typical limit is usually between 3 to 5 devices."} +{"doc_id": 98462, "author": "pepper_chico", "text": "Save the following AppleScript to a file named fullscreen.scpt: use framework AppKit use scripting additions repeat with runningApp in current applications NSWorkspaces sharedWorkspaces runningApplications() if runningApps isActive() set frontApp to (localizedName of runningApp) as text exit repeat end if end repeat tell application System Events tell process frontApp to set isFullScreen to value of attribute AXFullScreen of first window if frontApp = Finder tell process frontApp to set value of attribute AXFullScreen of first window to not isFullScreen else if isFullScreen do shell script lsappinfo setinfo -app & quoted form of frontApp & ApplicationType=Foreground tell process frontApp to set value of attribute AXFullScreen of first window to false (*fix to make sure the menu bar is not stuck*) delay 0.42 tell application Finder to activate tell process frontApp to set frontmost to true else do shell script lsappinfo setinfo -app & quoted form of frontApp & ApplicationType=UIElement tell process frontApp to set value of attribute AXFullScreen of first window to true end if end tell From terminal, compile it to an application with the following command: osacompile -o /Applications/Full Screen.app fullscreen.scpt Open the Full Screen.apps Info.plist (e.g. vim /Applications/Full Screen.app/Contents/Info.plist) and add the following to the dict: NSUIElement Add Full Screen.app as an exception in System Preferences > Security & Privacy > Privacy > Accessibility. Launch Automator and create a new Service. Change Service receives to no input in any application. Add a Library > Utilities > Launch Application action. Configure the action to launch the previously created Full Screen application. Save the service as Full Screen and close Automator. On System Preferences > Keyboard > Shortcuts > Services, scroll down to the bottom of the list and the just created Full Screen service should be listed there. Associate an unique Command shortcut for it, like Shift+Command+\\ or Command+F11 for example. This creates a shortcut to cause an application to enter full screen while removing the menu bar, or to exit full screen bringing the menu bar back. It provides an alternative full screen shortcut! For application-specific full screen launchers, check my other answer. Caveats There may be some disadvantages and/or misbehavior using this approach: It works by setting ApplicationType=UIElement, which causes the application icon not be added/highlighted in the Dock and make the application inaccessible via Command+Tab. The Command+Tab issue was reported in comments, I didnt notice it since I mostly use the Mission Control overview to change between full screen applications. It may not behave as expected for some specific applications, Ive noticed issues with the Activity Monitor application (which is generally not used full screen anyway) and theres a report on Chrome, which I didnt try since I use Firefox and it works great."} +{"doc_id": 426148, "author": "nsanders", "text": "I think having good challenges and learning opportunities is critical. Thats true when youre above the junior level too."} +{"doc_id": 426149, "author": "schwerwolf", "text": "The opportunity to work alongside experienced programmers."} +{"doc_id": 426150, "author": null, "text": "I personally like the office my company gave me."} +{"doc_id": 426151, "author": "Ryan Farley", "text": "I always love going to conferences and training and consider that a perk. Not all companies pay to have their devs continue to learn. Theres always more to learn. You benefit because they are learning more. They benefit from that too, but also have fun and get away from things for a couple of days and get to mingle with other devs."} +{"doc_id": 426152, "author": "Aaron", "text": "be flexible about the starting hour."} +{"doc_id": 426153, "author": null, "text": "Two flat-screen monitors, an optical mouse -- two things I dont currently have -- and each their own whiteboard with a few markers."} +{"doc_id": 426154, "author": "theo", "text": "Flexible Schedule Good PTO Program Fun & Exciting Technology/Toys Relaxed Work Atmosphere A great idea would be to let all your devs design their own workspaces. Different people need different environments to be productive."} +{"doc_id": 426155, "author": "Lucas S.", "text": "The opportunity to work alongside experienced programmers. And also the possibility of learn from them."} +{"doc_id": 426156, "author": "Peter Hilton", "text": "Philip Greenspun wrote about this once. He suggested making the office a better place to be than home, which is easier for young programmers. For example, domestic hardware that someone living alone cannot justify: expensive coffee machine, pool table, huge TV with DVDs to watch. Make the office more sociable: put beer in the fridge and have a drink together at the end of the day. Provide better food (easy for people who cant cook): get deli deliveries or a caterer."} +{"doc_id": 426157, "author": "BCS", "text": "Good hardware: Id be very interested if I was told that I would get a desktop system (WinXP is still my system of choice) and a Linux server box. Something I have root on and can run services on (local at a minimum, world visible would be nice.) A Virtual private server in the company data center instead of dedicated hardware would also work. Another thing that would be nice would be access to good references: We will buy you any books that are apropos to your job! same with software to some point, if its under $60, we will just get it. Edit: large screenS on pivot stands, good chairs, white boards, etc."} +{"doc_id": 426158, "author": null, "text": "The access to training and mentors. The things that Junior developers want is pretty much what every programmer that I know wants. They want to work in a relaxed and flexible environment with people who are at least as smart as them if not smarter. They want to feel like they are a part of something. They want to constantly be learning. Make sure that you have a training/book budget. Make sure that they are always learning and always have something interesting to work on. Make sure that you do team building or some kind of thing like that on a fairly regular bases. Lunch and learns are an increasingly popular tool these days. One thing that Junior Developers might like more than more Senior developers is the use of cutting edge or even bleeding edge technology. Be careful about this one, cause it can byte you in the butt, but it always helps."} +{"doc_id": 426159, "author": "shadit", "text": "Give them each a budget and let them configure their own computer setup. Make them submit a plan for what they intend to purchase. Talk over the plan with them. It will be a great way to kick things off. Give them a budget for a cell phone and unlimited plan that the company will pay for. Pay for their home Internet service. Little things like these they will show their friends to the response of, Cool - I wish my company did that!"} +{"doc_id": 426160, "author": "Brian", "text": "Treat them as peers"} +{"doc_id": 426161, "author": null, "text": "One nice perq we have here (beyond training, great environment, and the rest) is subsidized gym membership."} +{"doc_id": 426162, "author": null, "text": "give them responsibilities and some degree of freedom. make them feel like they are developing something for themselves, with passion"} +{"doc_id": 426163, "author": "MarlonRibunal", "text": "Being able to work remotely + flexible hours, Tech books give-a-way, and lots of love!"} +{"doc_id": 426164, "author": "BCS", "text": "Lets them, on company time, do some private projects (things that could be useful for the company, but things they get to pick)"} +{"doc_id": 131246, "author": "Wesley Wiser", "text": "Check out Official Android Developers Dashboards. Data as of September, 2017 1 Version Codename Distribution 2.3.3-2.3.7 Gingerbread 0.6% 4.0.3-4.0.4 Ice Cream 0.6% Sandwich 4.1.x Jelly Bean 2.4% 4.2.x 3.5% 4.3 1.0% 4.4 KitKat 15.1% 5.0 Lollipop 7.1% 5.1 21.7% 6.0 Marshmallow 32.2% 7.0 Nougat 14.2% 7.1 1.6% 1 Data collected during a 7-day period ending on September 11, 2016. Any versions with less than 0.1% distribution are not shown. (Note: Beginning in September 2013, devices running versions older than Android 2.3.3 do not appear in this data because those devices do not support the new Google Play Store app. As of August 2013, devices with versions lower than 2.2 accounted for about 1% of all devices.)"} +{"doc_id": 426166, "author": "Kenny Mann", "text": "Im currently slightly experienced but I still call myself junior. Here is what I appreciate of my employer: Buys me books. I have a diverse taste from C# to perl to C to Asm to database design to tsql etc. Book prices vary from $20 to $50. This usually requires a PO and approval and such. Allows me to critique current projects. Ive re-written a few project to be MUCH cleaner through the experience I gain. Each time I document why I made those changes. Every now and then I re-write my re-writes. Its amazing to see how much you change. I do this one on my own. I initiated it. A fast computer and a 24 monitor. This actually helps a lot, but for any developer. Less frustration and more code on the screen. Monitor also rotates for those kinds of days."} +{"doc_id": 426167, "author": "Mitchel Sellers", "text": "There are a number of things that come to mind, and not even for junior people. Training packages for use with conferences, certifications, or something similar. Showing a dedication to future growth in the field Provide flexiable starting times especially to those just getting out of college and not used to working a day job if In an environment where they must work from home, help them out a bit there, subsidize internet service, and/or company cell phone. If you must have access to them, giving them a way to do it helps."} +{"doc_id": 426168, "author": "Max Cantor", "text": "In my experience, good programmers want to program with as few distractions as possible. Some of these are more relevant to big companies, and Im not sure where you work, but here are some examples: Casual dress code: Young programmers in particular will have a tough time avoiding resentment of a strict dress code. Im just going to sit at my desk all day--why do I need to wear slacks/polos/other uncomfortable business clothes? In my opinion, this is half rebellion and half honest productivity-seeking: It really is much easier to program in jeans and a t-shirt than slacks and a formal button-down. The question you probably need to ask yourself is if the potential productivity gain and morale boost is worth the potential loss of professional atmosphere. It all depends on your situation... there are startups and Fortune 500 companies out there which allow jeans & t-shirts. Few meetings: Almost nothing is more distracting than a constant stream of meetings. Try to avoid team-wide status meetings that could be carried out via individual e-mails or conversations. Programmers like it when their employer lets them program. Experienced coworkers: Good programmers want to improve. If any of your other employees have contributed to big open source projects, or have worked individually on some particularly successful internal projects, let your prospectives know! Private offices: This is rarely practical anywhere but venture-capitalized startups, but if you can offer candidates their own offices, theyll leave the interview with hearts in their eyes. Programming is so much easier when you arent distracted by foot traffic and people singing happy-birthday one cube over. Cool stuff: If you can afford it, subsidize games for lunch breaks and post-work hang out sessions. Best practices: This will ensnare good programmers and intimidate less experienced ones: Show that your candidates will be working with reliable, sane version control, and that there are coding standards about unit tests or inheritance or anything. Organization is important. Dont nickel-and-dime: If you can be flexible with hours, do it! No one likes having to clock out every time they go to the restroom; it feels like youre not being valued as an employee. Dual monitors: Instant win for almost any programmer whos worked with dual monitors before."} +{"doc_id": 426169, "author": null, "text": "Work from home. (for voting)"} +{"doc_id": 426170, "author": null, "text": "Casual dress (for voting)"} +{"doc_id": 426171, "author": null, "text": "Private offices (for voting)"} +{"doc_id": 426172, "author": null, "text": "Good hardware (for voting)"} +{"doc_id": 32957, "author": "krishan", "text": "You can now simply go to System Preferences > App Store, and turn off Automatically check for updates. No messing with firewalls, and just works with App Store. So just remember to check every now and then manually to find out when you have updates!"} +{"doc_id": 426174, "author": "CrashCodes", "text": "Admin rights to their PCs An internet connection thats not gimped by bizzaro proxy rules Dual Monitors Work from home privileges A soda fountain (not a drinking fountain that dispenses soda instead of water ala Brawndo, but like youd use at the Taco Bell to refill your drink)"} +{"doc_id": 32958, "author": "David Holdeman", "text": "Safari being two processes When a site has some javascript that hangs the web content process, I can still add new tabs, navigate in other tabs, etc."} +{"doc_id": 426176, "author": "benPearce", "text": "My company has purchased an OReilly Safari Online account for each of our developers. I have access to thousands of books online at any time. We also have training videos available at online from CBT Nuggets but I find their content limited. Also, some productivity tools, for Visual Studio, such as CodeRush/Refactor Pro or Resharper Quality Coffee in-house."} +{"doc_id": 426177, "author": "John Rudy", "text": "When I was just starting out, I benefited greatly from the mentoring of others in the office. It helped a lot, and I viewed it as a serious perk -- I was often quoted as, Im getting paid to learn! There are all the trivialities (games in the office, DVDs, etc.) -- I think that while they make for a great interview carrot, theyre not a reason said programmers will stay. Indeed, once their work ramps up, theyll probably realize they have little time for those perks and wonder why the company even bothers. As a junior, learning from someone who respects you, is able to teach you and is able to lead you is very enticing long-term. It may not have the interview sex appeal that the others do, but its something I think all serious developers did appreciate (or would have appreciated, if they didnt get it). Sponsor a corporate-wide subscription to Safari. Allow a junior dev to take 2 or 3 hours a day learning. Make him feel valued. Let him contribute. Which is another biggie: Make him feel like part of the team, and give him projects which not only interest him, but also challenge him. Too often, the junior dev gets the jobs like move control X to the lower right corner, or write all the property routines (or getters/setters in Java/Obj-C/et al), or add javascript validation. Give him something to do which makes him feel useful, like a real contributor. Hell appreciate that, too -- and probably become more passionate about your firm and your practices. (BTW, my use of him is not meant to be sexist; its just a shorthand. Please expand it to him/her mentally.)"} +{"doc_id": 426178, "author": "Cade Roux", "text": "Good project management - with minimal BS and meetings under control Good technical mentoring Book reimbursement, resources, tools And I take issue with the aside from cash I think cash isnt really ranked up that high unless the environment is so poor - thats why they call it compensation."} +{"doc_id": 426179, "author": null, "text": "Invite your whole team to the restaurant of their choice every Friday for lunch. A former boss of mine used to do just that and it really helped team bonding. If budget doesnt allow it, you can do it once every two weeks or once a month. But think of the value of having closer team members."} +{"doc_id": 426173, "author": "skiphoppy", "text": "The chance to devote time to learning. Give them the chance to spend longer than expected for a task so that they can pore through books and search across the net to learn the best way to do things. Give them OReilly books. Encourage them to spend time reading them. Encourage them to make connections online and become familiar with sites such as this one where they can learn the habit of trying to program well instead of trying to program just to get done. Yes, thats a perk. For them as well as for you. :)"} +{"doc_id": 426181, "author": "Asmor", "text": "Speaking as an actual college student (senior), heres some things Id like: A degree of direction (tell me what you need done) A degree of autonomy (trust me to get it done) Im probably unusual among my peers in that I prefer professionalism. As a general rule of thumb, I think casual dress would be very helpful, though it wouldnt be a huge issue for me personally. But really, the big thing is trust, and letting me do what youre paying me to do. If I think Im going to be stuck attending constant meetings and always worrying about office politics, thats a big strike against you. Competence is also very important... I dont know if I could work for a manager who knew nothing about programming. I understand that its entirely likely a great manager might not even be as good a programmer as I am, but they should at least know enough to know whats feasible and whats not. Oh, and probably the biggest thing for me: Long term prospects. I hate job hunting, and Id tolerate an otherwise-mildly intolerable job if I knew that I wasnt likely to be laid off, out-sourced, etc."} +{"doc_id": 65734, "author": "sudo", "text": "In case the other reasons werent good enough... Dont forget that you cant use Homebrew as root (which is actually a huge pain). Other programs also dont let you use them as root or run into permissions problems when you do, often times for no apparent reason, because their programmers assume that they wont be run as root. I think Steam is one of them. Its also nice to have all the system and user stuff separate for various reasons. I dont know if its that bad of a security issue. Id personally be more worried about problems with organization and permissions than anything else."} +{"doc_id": 426183, "author": "JB King", "text": "Casual dress code Free pop (This was one that I really liked back in the dot-com days and miss it sooo much) Flextime and telecommuting Configure there own machine w/dual monitors and a budget Benefits like health care, dental and vision - Some of us like being able to get a discount on glasses or having our teeth checked. I would also suggest making sure there is a clear process for how work will be done as junior programmers may not necessarily be aware of all the best practices and what kind of environment you want to give them."} +{"doc_id": 426180, "author": null, "text": "In addition to what has been said, make sure you have them work on stuff that has impact on the business. If they feel that you value their work as a core part of your business, they might become much more engaged in their projects. If they do, thats the kind of developers you want full time."} +{"doc_id": 426185, "author": null, "text": "Experience with experienced programmers. Games, free food, free massages, are just gimmicks (cough google cough)"} +{"doc_id": 426186, "author": "Dipak Patel", "text": "Im surprised the cynics amongst us havent said non brain-dead leadership! Attracting young people with toys is a bit patronising, better to say: Yeah so we could offer you lots of new shiny toys, but how about we guarantee you no PHBs instead? ;-)"} +{"doc_id": 426187, "author": "Yaba", "text": "Give them the choice of tools as far as possible. I know its not always possible, but I guess there is nothing more demotivating than forcing a Linux guy to use Windows, a MAC Guy to use Windows, or a Windows Guy to use Linux. Of course thats not always possible, but also what about favourite email clients? Some love thunderbird, others outlook and others mutt."} +{"doc_id": 393420, "author": "Toon Krijthe", "text": "I have participated in two big rewrites. First was a small project. The second was the main product of a software company. There are several pitfalls: rewrites always take longer than expected. rewrites have no direct effects/benefits for the customer. capacity devoted to rewriting isnt used to support the customer. you will lose functionality with a rewrite unless you have 100% documentation. Rewrites are seldom the real answer. You can refactor much of the code without losing anything and without a lot of the risk. Rewrites can be the answer if: you are switching to another language or platform. you are switching frameworks/external components. the existing codebase is not maintainable anymore. But I strongly advise the slow approach using refactoring. Its less risky and you keep your customers happy."} +{"doc_id": 426188, "author": "Ben Collins", "text": "The best equipment: chair monitors modern workstation (e.g., nothing older than 2 years) ergonomic keyboard Matching 401k (the higher the match, the better) Good mentoring. Freedom to pursue creative outlets related to work projects (i.e., 20% time). Update: after reading other answers, I think Id also say: private office individual book/training budget HDHP with the amount of the deductible given at the beginning of the year in the form of an HSA"} +{"doc_id": 426190, "author": null, "text": "In my opinion this will be great perks for new programmers. Though it would also be awesome things to have for any programmer. :) Smarter and more experienced developers from whom you can learn from Good software engineering practices that is used throughout the company Exciting projects (though this might just come along after you find that the developer is fit for the job at interview time) A friendly and supportive environment Dual monitors A comfortable chair (since you will be spending most of your day sitting down), and ergonomic keyboard/mouse A programming books library, and the chance to request more books to add to the collection Lunch time or after work gaming sessions Clean kitchen with a decent coffee machine On top of that there is an extra big plus for passing the Joel Test. I am not too keen myself to give/have an own office. Mostly because lots of programmers are very sociable people, and it would be good to have some interaction during the day. However, that might just be a personal choice."} +{"doc_id": 393423, "author": "Ryan Hayes", "text": "Its time for a rewrite when: The cost of rewriting the application + maintaining the rewritten application is less than the cost of maintaining the current system over time. Some factors that make maintaining the current one more expensive: The language is so old you have to pay people that know it a lot of money to program in it (COBOL). (from experience, unfortunately) The system is on a hardware architecture that is so old that they have to scour Ebay and COLLECT parts to add to the machine it is running on because they arent made anymore. This is called hardware life support and is expensive because as parts become more scarce, they (may) go up in price or they (absolutely) will eventually run out. It has become so complex that the //Here be dragons. comment is all over your code. You cant write any other projects and add new value to the company because youre always patching this ugly beast."} +{"doc_id": 426189, "author": null, "text": "There were lots of good suggestions already. I did a quick search on the all the response I cant find these so Im including these 1. Good health insurance coverage from the employer. 2. Paid time off. it really helps to re-boost employees."} +{"doc_id": 426191, "author": null, "text": "A decent manager, good training, and good motivation would be nice. In all of my past jobs, the training sucked, the managers didnt care, and they ended up motivating me right into a new job. Treat your employees well, and the perks will matter less. (But free food never hurts, either :))"} +{"doc_id": 426192, "author": null, "text": "One thing that would be very appealing is if an employer offered to sponsor one non-work interest for each employee. This could be something simple, like paying for karate classes or offering a small scholarship for those who are taking night classes for a graduate degree. I think that contributing to making an employee a more well-rounded person will actually pay dividends for the employer in the end. Team outings are fun, help bring people together and act as much-needed breaks when projects get intense. Offering even bi-monthly events could be a nice incentive."} +{"doc_id": 426194, "author": null, "text": "Lunches out - on the company, of course...with beers. After work beers on Fridays. Beer is the key."} +{"doc_id": 426195, "author": null, "text": "I think the biggest perk for a new programmer is when they first join the company they have a plan and know exactly what there career road map is. When I first started my current job I was given some interesting work right from the start and I knew exactly what was expected of me. Other fresh graduates were left to school themselves up which ultimately helped them to loose interest in the work completely. Other gimmicks like a big screen etc are great but they dont make a boring job any better!"} +{"doc_id": 426196, "author": null, "text": "A chance to be part of a successful team."} +{"doc_id": 426198, "author": null, "text": "interesting work. When I started programming many years ago, you got lumped with the crap work as no one else wanted to do it."} +{"doc_id": 393431, "author": "Michael Meadows", "text": "Sorry, this is going to be long, but its based on personal experience as both architect and developer on multiple rewrite projects. The following conditions should cause you to consider some sort of rewrite. Ill talk about how to decide which one to do after that. Developer ramp-up time is very high. If it takes any longer than below (by experience level) to ramp up a new developer, then the system needs to be redesigned. By ramp-up time, I mean the amount of time before the new developer is ready to do their first commit (on a small feature) Fresh out of college - 1.5 months Still green, but have worked on other projects before - 1 month Mid level - 2 weeks Experienced - 1 week Senior level - 1 day Deployment cannot be automated, because of the complexity of the existing architecture Even simple bug fixes take too long because of the complexity of existing code New features take too long, and cost too much because of the interdependence of the codebase (new features cannot be isolated, and therefore affect existing features) The formal testing cycle takes too long because of the interdependence of the existing codebase. Too many use cases are executed on too few screens. This causes training issues for the users and developers. The technology that the current system is in demands it Quality developers with experience in the technology are too hard to find It is deprecated (It cant be upgraded to support newer platforms/features) There is simply a much more expressive higher-level technology available The cost of maintaining the infrastructure of the older technology is too high These things are pretty self-evident. When to decide on a complete rewrite versus an incremental rebuild is more subjective, and therefore more politically charged. What I can say with conviction is that to categorically state that it is never a good idea is wrong. If a system can be incrementally redesigned, and you have the full support of project sponsorship for such a thing, then you should do it. Heres the problem, though. Many systems cannot be incrementally redesigned. Here are some of the reasons I have encountered that prevent this (both technical and political). Technical The coupling of components is so high that changes to a single component cannot be isolated from other components. A redesign of a single component results in a cascade of changes not only to adjacent components, but indirectly to all components. The technology stack is so complicated that future state design necessitates multiple infrastructure changes. This would be necessary in a complete rewrite as well, but if its required in an incremental redesign, then you lose that advantage. Redesigning a component results in a complete rewrite of that component anyway, because the existing design is so fubar that theres nothing worth saving. Again, you lose the advantage if this is the case. Political The sponsors cannot be made to understand that an incremental redesign requires a long-term commitment to the project. Inevitably, most organizations lose the appetite for the continuing budget drain that an incremental redesign creates. This loss of appetite is inevitable for a rewrite as well, but the sponsors will be more inclined to continue, because they dont want to be split between a partially complete new system and a partially obsolete old system. The users of the system are too attached with their current screens. If this is the case, you wont have the license to improve a vital part of the system (the front-end). A redesign lets you circumvent this problem, since theyre starting with something new. Theyll still insist on getting the same screens, but you have a little more ammunition to push back. Keep in mind that the total cost of redesiging incrementally is always higher than doing a complete rewrite, but the impact to the organization is usually smaller. In my opinion, if you can justify a rewrite, and you have superstar developers, then do it. Only do it if you can be certain that there is the political will to see it through to completion. This means both executive and end user buy-in. Without it, you will fail. Im assuming that this is why Joel says its a bad idea. Executive and end-user buy-in looks like a two-headed unicorn to many architects. You have to sell it aggressively, and campaign for its continuation continuously until its complete. Thats difficult, and youre talking about staking your reputation on something that some will not want to see succeed. Some strategies for success: If you do, however, do not try to convert existing code. Design the system from scratch. Otherwise youre wasting your time. I have never seen or heard of a conversion project that didnt end up miserably. Migrate users to the new system one team at a time. Identify the teams that have the MOST pain with the existing system, and migrate them first. Let them spread the good news by word of mouth. This way your new system will be sold from within. Design your framework as you need it. Dont start with some I-spent-6-months-building-this framework that has never seen real code. Keep your technology stack as small as possible. Dont over-design. You can add technologies as needed, but taking them out is difficult. Additionally, the more layers you have, the more work it is for developers to do things. Dont make it difficult from the get-go. Involve the users directly in the design process, but dont let them dictate how to do it. Earn their trust by showing them that you can give them what they want better if you follow good design principles."} +{"doc_id": 426199, "author": null, "text": "A quality chair aeron chair http://www.hermanmiller.com/hm/content/product_showroom/products/images/P_AER_L146_W.jpg"} +{"doc_id": 426200, "author": "Jerry", "text": "Dont throw them in with the general population. Give them a place with some degree of privacy, where they can concentrate and not be constantly distracted by phones, business conversations and foot traffic. Try to give them specified projects with finite, tangible requirements. Give them goals to achieve, instead of open-ended projects that leave them at the mercy of business types who refuse to ever commit to a specification. Have and enforce a change request policy. Have and enforce a clearly defined chain of command that requests have to flow through. Make sure they have more experienced programmers to aspire to and seek advice from. I would take these things over foosball tables and free soda any day."} +{"doc_id": 426201, "author": "Devin Jeanpierre", "text": "Hey, well, Im still in university, so I guess I might be qualified to answer! I can tell you what would attract me personally to a job, but I cant really speak in general terms. For me, the most important thing is interesting work. I dont want to maintain a 40 year-old accounting system. I do want to do something challenging and fun. Maybe thats a bit much to ask for, but I would expect others to ask for it as well. I think this leads a lot of programmers into the game development industry, and apparently they get burned out there, so thats not cool-- but that doesnt mean other development cant be fun. It would depend, obviously, on the person involved. Id love to do things like image manipulation and simulations (and, yes, game development), but I havent gone deep into other areas. The number one pulling me into a job would really be the fun aspect-- cheap things like a dedicated wii room and comfortable clothes do help, but neither will make me want to take a job fixing the remaining y2k bugs, or whatever else needs doing."} +{"doc_id": 393435, "author": "Jay", "text": "I think Im in the only situation in my career where the big rewrite is the answer: Company merger, huge overlap in systems functionality. Many, many systems have been merged and retired, and others still in the process. Ive inherited a system from the other company that still lives on. It has a huge code base, which used to support multiple departments very similar to our own, but with a completely different design and frameworks. There is only one business sector left using it, which makes enough money that keeps this thing alive in a zombie state. All the old expertise is gone, there is no documentation. The support burden is high, with failures every week. It has not been merged into our companies systems, because our company never supported this particular sector of the business, so we dont have the functionality or expertise. It looks like this is the one case where the rewrite is needed. It looks like Im going to have to figure out this behemoth, pull out the bits of functionality dedicated to this business, and rewrite the pieces that need to be added to our existing systems. Once that is done, our existing systems can support this new sector, and this beast can be put out of its misery. Otherwise Im going to lose my sanity."} +{"doc_id": 426204, "author": null, "text": "Smart people and cool projects would attract the best programmers. IMO, if you rely only on monetary incentives, youll most likely attract the wrong crowd."} +{"doc_id": 426205, "author": null, "text": "Freedom to make mistakes and learn Knowledgable and tolerant team members Great hardware and a single widescreen monitor"} +{"doc_id": 426202, "author": null, "text": "The type of people youd like to hire tends to be a first-order concern when deciding what sort of perks to offer. For the programmer whos thinking about or in the process of raising a family, paternity leave, company matching of adoption funds up to $X/year, flexible vacation and working hours, and a sense of job security may be much more attractive than a soda machine and free Segways for all. You mention that youre looking for junior or young programmers, but many young folks do still fall into this category. I sense, however, that by young, you might mean too young to be into that whole work-life balance thing. Lets call this The Google Strategy. The idea here is to make it so it just doesnt make sense to their analytical minds to ever leave work. Have on-site services like free food, drink, and laundry, provide gathering places for informal conversations. Make them feel like theyre the rock stars of the company, and theyll repay you with long hours and hard work. The good news for you is that these types of perks dont cost you much at all relative to the increased hours theyll be willing to put in. The bad news is that this model tends not to be sustainable, and this dot-com era irrational exuberance no longer satisfies your programmers when they start to want to take vacations, get married and go on a long honeymoon, have kids, and so forth. At that point, they want flexibility, more vacation time, a 401k, etc. Besides the first one, these all cost significant coin. Heres the most important point though: if youd like to hire the absolute brightest people you can find, dont try to outsmart them. Odds are, the really sharp ones will be a little less interested in the size of the Free Red Bull Fridge and the number of air hockey tables at their disposal, than whether youll value them as an asset to the company and as an individual (both in terms of compensation and employer/employee relations in general), whether you have a sustainable business model/plan, whether your work really excites them, and whether your work really excites you. Id suggest reading a couple essays on Joel On Software, he treats the subject of hiring good programmers in a fair amount of detail (Smart, and Gets Things Done, I think, is the name of one of the essays). While your question certainly isnt without merit, and providing a work environment with some of the same perks as your competitors will make your sales pitch somewhat easier, the only people that will be truly swayed by these kinds of things are not the people you want the success of your small company to depend on. Good developers want to feel like theyre making a contribution to something that matters, like their skills are valued and put to good use, like they are responsible to their peers and to themselves. Focus on having a truly great, dynamic company that does great work, and that treats its technical people with respect (things like private offices help here, too), and youll really attract the type of people youre looking for. (Thanks to Thomas Kammeyer for a tip on the last paragraph!)"} +{"doc_id": 426203, "author": null, "text": "I am a recent graduate. In my opinion, the most appealing perk for me is having an interesting project to work on. I dont want to be writing simple in-house enterprise applications all day. This may be someone elses idea of fun. However, it is not mine."} +{"doc_id": 426206, "author": "coder1", "text": "Programmers need vacation. Lots of it. Four weeks a year to start. Minimum."} +{"doc_id": 426208, "author": null, "text": "The option to install whatever software you need to get the job done. Notepad++, Pownce or whatever."} +{"doc_id": 426209, "author": "Rob", "text": "Heres something: Dont leave them in the dark when they are just starting. They will be very uncomfortable if they have no direction when they start. Make sure they have very, very clearly defined tasks with measurable deliverables. When I first started, I was throw into a mess of a product with no direction and told to fix bugs that made absolutely no sense to me. Find somewhere appropriate for them to work and make sure you give them what they need to contribute positively. Otherwise youre just going to have a bunch of college kids surfing the web on your dime."} +{"doc_id": 426210, "author": "coder1", "text": "A career path. Not that they necessarily have to follow it, but give them the thought that they dont have to be a junior forever, and show them that there are opportunities in the company. Give them an idea of what it takes to advance."} +{"doc_id": 426211, "author": null, "text": "Training is by far the #1 thing. It was when I was starting out. Company funding for books and/or conferences. Time to work on projects that might not directly be a product but can help in advancing skills (and could possibly turn into a product). Time with Senior level developers/mentors."} +{"doc_id": 426212, "author": "Joshua Carmody", "text": "Be flexible with office hours. If a programmer gets his best work done between 1:00pm and 10:00pm, or he has other classes or some other reason to need flexible hours, why force him to work 9:00-5:00? Naturally you may need programmers in the office at certain times for mentoring/training/code review/important meetings, etc. But most programmers appreciate flexibility where it can be found."} +{"doc_id": 426214, "author": "rpattabi", "text": "There is nothing like the company of an experienced fellow programmer guiding the new programmer. I am always thankful to my very first mentor when I entered into software development. (Thanks Chris!)"} +{"doc_id": 426213, "author": "JohnMcG", "text": "A boss who would ask this question."} +{"doc_id": 426215, "author": "Nathan Koop", "text": "I would argue against private offices, I would promote more of an open office concept with war rooms so that the the newbs can quickly ask a more experienced person quickly & easily. But keep the rooms smaller, five or less people. Also, dual or triple monitors is a must."} +{"doc_id": 426217, "author": null, "text": "Perks? Mentors: Single greatest asset i was given. Someone who showed me the ropes, listened to me, took me aside when i messed up, explain why (not how) things were done. Someone who had knowledge of the product (not a HR/PR person), or could distill something in ten minutes or less. Sometimes new people are afraid to ask questions. Goals & Salary: When your programmers start, have them write down three goals theyd like to achieve in three months. They dont need to be climb mount Everest, write a compiler type goals. But They must measurable. Its a great tool to find motivated people. Fitness Bonus Where i work, if you can accumulate 500+ km in one year biking to work, the company will write you a check for $500, just like that. Its great way to encourage this whole being green thing and helps relieve stress and saves money. The Best Tools Provide programmers with the best tools. I cant tell you how much resentment I felt was I was told that VS2003 was too expensive, but all the sales staff had blackberries. It made me feel undervalued and i eventually quit. Perk time Allow your coders 20% of there time to work on their own projects. Its a great way to spur ideas, and helps keep people motivated."} +{"doc_id": 426216, "author": "MikeJ", "text": "Perks that I have liked: 1) a book budget to get technical books related and unrelated to the job 2) assigned mentor - someone more senior to help show me the ropes and tell me about the culture 3) pop/snack area with minimal (better is no cost) to staff 4) notebook,wifi and lounge where you can be more relaxed when you arent coding hard but still working on things like email. our company has 4 of them than you can pick up in the lounge and curl up on the couch and read mail etc during lunch or during an unwind time 5) budget for movie tickets, dinner out etc. to give to staff after they have done a grinder or delivered a key element on time - anything to make them feel special and remembered for hard work"} +{"doc_id": 426218, "author": "Jeremy Cantrell", "text": "In my opinion, the best perk a new programmer can have is a good mentor who is extremely knowledgeable and understanding."} +{"doc_id": 426220, "author": "Jim In Texas", "text": "I think private offices are overated, especially for junior developers. OTH managers must understand that every time a developer is distracted by noise, people walking around them, or being in a huge bullpen or a sea of cheap cubes that it costs the firm money in the near term. Good work areas, especially good chairs and monitors, make a huge difference. Any kind of dress code beyond naughty bits must be covered is insane when applied to developers. Having non-flexible work hours is insane when applied to developers. In general what is known in management theory as Taylorism is a good way to drive away the best developers. All developers, especially junior developers, appreciate formal training opportunities."} +{"doc_id": 426221, "author": null, "text": "Shower on the premises, so that employees can jog/cycle to work."} +{"doc_id": 426222, "author": null, "text": "Free coffee Good nearby food Well stacked library"} +{"doc_id": 426223, "author": null, "text": "Apart from the hard stuff like offices, tools, gear, food and snack Id like to add something that makes me feel special: Let your developers in on decisions! If youre getting new tools for them, or moving or starting a new project or even hiring new people -let your developers in on those decisions. Its only fair you get a say in who your new coworker is or what the next big thing you are going to work for a few years on. One way to do this is to conduct meetings in a round table fashion where you specifically ask every attending person for their opinion, not just let them speak up if they wish."} +{"doc_id": 426224, "author": null, "text": "Besides money, the greatest attraction for a new developer would be an experience that will allow him/her to build his career on strong footings. A developer can get this experience by working in an environment that will allow him to learn, improve, strive to achieve challenges, where quality (of code, documents, etc) has some value, where best practices are followed, where people look for a better solution and most important point is - No internal politics."} +{"doc_id": 65773, "author": "bodangly", "text": "Just a few examples why its not ok to always run as root: Root user can easily place files in locations that are far more difficult to track down. Root user has raw access to interfaces and so can put an interface into promiscuous or monitor mode and log ALL network traffic. Root user has raw access to device nodes and can thrash a disk making it far harder to recover files than a simple rm at user level. Root user can also potentially modify boot sectors of a drive, making malware persistent even after a reinstall of the operating system. I could go on. The point is that there ARE good reasons not to run as root. I dont disagree that to most people their most personal data is in their home directory anyhow, but running as root still does put that data, and the entire system, at a greater risk. The advice to not run as root is not misplaced. If a person does not understand the implications of running as root, they definitely should not be doing so. Suggesting anything else is irresponsible."} +{"doc_id": 426226, "author": "prasanna", "text": "Being a college student who would go for job in a few short years, Id say its definitely Casual dress code -- why does my dress matter when I can program good enough? Mentoring -- some older, wiser programmers to guide you. Id just have been out of college, used to having a professor around the corner or a TA to throw questions at. Friendly/productive atmosphere -- Id like to have people who will discuss codes after their job and not make me go to really stupid meetings that dont get things done. Boss that understands programming -- Ive been surrounded by all CS people who think in similar ways and understand me. Id want to have a boss to be similar. Gym/Fitness membership... -- It just helps to vent off pressue of programming.. Some resources to work on own projects -- I would want to do some of my own things, even after office hours if required.. Id be glad to use to company resources. Please please, root on my PC.. or admin -- I know what I do, please give me rights.."} +{"doc_id": 426227, "author": null, "text": "Simply follow Jeff Atwoods (PBUH) Programmers Bill of Rights and they will come. It doesnt hurt to provide abundant caffeination infrastructure as well :)"} +{"doc_id": 426228, "author": "Jasper Bekkers", "text": "These are all personal :-). Free coffee. I have solved countless problems while waiting for my coffee to finish, or even walking to the coffee vending machine. Laptops. I dont care about fancy dual monitor setups everyone keeps mentioning because I usually end up working on only one of them anyway. However, having a laptop and being able to work from any part of the company more valuable to me. I can just take my problem with me and it makes it easier for me do demonstrate what is going on to a college. Smoking area. I smoke, and although I dont smoke that much, its really nice to actually spend five minutes somewhere else. The most interesting discussions I have with peers are usually while smoking. Open office. I dont like to sit in an office, by myself, for a prolonged length of time because it makes me feel like a machine. To me, interaction with peers is a huge motivation to go to work. Whiteboard and artistic people around. If there are any webdesigners, 3d modelers, sound guys or whatever type of artsy people you can find; put them in the same room as the programming / tech guys. This too makes the job seem less mechanical. No dress code. Ill quit the day someone will try to make me wear a suit. They honestly dont make me feel comfortable, besides that, I probably wouldnt fit in such a formal culture anyway. Besides that, Im a pierced up coding goth that delivers the best work when I dont have to worry about something other than code. That include clothing. Learning opportunity. Doesnt matter what, it could be seminars, peer reviews, book, research time, anything goes. If the job requires concurrent programming: a dual core machine at least. A stash of ritalin, lol. I dont care about: Dual monitor setups. As stated previously; they distract me so, I tend to prefer widescreens. Fast hardware; it hard these days to actually get slow hardware these days. Gadgets. Free internet at home, or a cell phone. I already have those. The editor, IDE or OS I have to use as long as I can figure out how to work with it in an hour or two (it usually takes less time though). Huge paychecks. Give me a pleasant working environment where Im happy to be for the biggest part of the week and Im happier than when I have a huge pile of money stashed away at the bank. Use that cash to improve the office conditions. Game rooms, guitars, pooltables, foosball or airhockey tables et cetera."} +{"doc_id": 426229, "author": null, "text": "Well, working on challenging and interesting projects, being respected and not being ignored (some junior developers are just forgotten in a corner of the office) can be better than throwing them games and gadgets."} +{"doc_id": 426230, "author": null, "text": "Im a current college student, graduating in about a year, and the only thing that matters is respect. Money, hours, aeron chairs, multiple moniters, admin rights to your own computer, private office, telecommuting rights, these all represent the same thing: the employer views you as a real employee. Clock ins, lowball offers, drug tests, cubicle farms, folding chairs, ect., these all represent the opposite: the employer views you as a stupid little kid. The most intelligent and hardworking graduates are probably not as interested in the free soft drinks and game lounges as they are in the idea that they will be viewed as important contributors, both to your company and the field of software engineering at large."} +{"doc_id": 426231, "author": null, "text": "Independence , and a feeling that their Inputs matter Work From Home Allow for Personal work at Office (initially there might be lot of wasteage of time , Slowly it will come down automatically) Casual Dress code Laptops and Not workstations Creative projects Allow them to Work on Other things not limited by Work Profile (Like a new programmer wold cherish the idea of having the liberty to directly interact with the Clients and Understand / Solve Problems) All this would be grt for them , And would think twice before leaving as they would feel suck would place would not be available elsewhere."} +{"doc_id": 426232, "author": "mscccc", "text": "Gym Membership Video Games Dual Monitors 4 weeks+ of vacation Flexible starting hour If no private office then noise cancelling headphones. And MOST importantly other people their age to work with. When you are 22-23 years old it is really hard to relate to your coworkers when they are all talking about their kids/families."} +{"doc_id": 458998, "author": "HLGEM", "text": "There is no one right answer to this. It depends on what you use the database for. In an enterprise application, you need the logic in the database through foreign keys, constraints, triggers, etc. because it is the only place where all possible applications share code. Further, putting the required logic in code generally means the database is inconsistent and the data is of poor quality. That may seem trivial to an application devloper who is only concerend with how the GUI works, but I assure you that the people trying to use the data in compliance reports find it very annoying and costly when they get billion dollar fines for having data that didnt follow the rules correctly. In a non-regulatory environment when you dont care as much about the whole set of records and only one or two applications hit the database, maybe you can get away with keeping it all in the application."} +{"doc_id": 426234, "author": "slim", "text": "bright colleagues interesting challenges flexitime freedom to fail (if you never fail, youre not being challenged enough) freedom to innovate (i.e. an organisation that doesnt stonewall ideas from juniors) Google-style 20% time -- or something similar the sense that attending conferences and education is encouraged, not merely allowed casual dress code dining facilities on site or very nearby I would suggest that working from should not be the norm for junior hires - they need face to face contact in order to become part of the team. Its good if they have the facilities to work from in order to do out of hours work, or have occasional home days."} +{"doc_id": 557306, "author": "Duncan Jones", "text": "The other answers do an excellent job explaining the maths behind the key exchange. If youd like a more pictorial representation, nothing beats the excellent paint analogy shown on the Diffie\u2013Hellman key exchange Wikipedia entry: Image is in the public domain"} +{"doc_id": 426236, "author": "BenAlabaster", "text": "Some of these have been mentioned before, while others seem to have been skipped over... A bluetooth headset - preferably one that multi-pairs with my desk phone and my cell phone and lets me listen to music in stereo. Less is more, right? I dont want to have to keep switching headsets to answer different phones or listen to my music, and I definitely dont want to have to hold the receiver while I try and continue my daily work - and I dont want half a dozen gadgets cluttering up my desk, the fewer the better. OReilly Subscription - I think this costs me $40 a month which Id rather not pay for myself, but I refuse to live without it, so I do. MSDN License - The one with all the nifty stuff like Expression Studio, Visual Studio Team Edition etc. This currently costs me a small fortune, it would be nice if it came as a perk of my job! Software - Dont give me hassle about purchasing software that will make me more productive when I ask - XmlSpy, Icon Workshop, Resharper/CodeRush just buy it and bring it to me when it arrives, the small amount of $$$ it costs, by the time youve wasted a half hour of my time having me write up justification and youve wasted another 10 minutes reading it, weve just spent more than the cost of the software. Flex Time/Telecommuting - If I arrive late, chances are I didnt leave until late last night, dont quiz me like a five year old where I was at 8:30 when everyone else arrived! Where were you and everyone else at 2am when I left? Give me leeway to be myself. Putting my feet on my own desk is perfectly acceptable behaviour, as is listening to music, eating, having pop on my desk etc. As long as Im not disturbing anyone elses workflow and Im meeting all my deadlines/objectives, thats all that matters. Home internet connection and VPN privileges - for those work from home days. Time to think - without questioning what Im doing instead of working - were programmers, thinking is working, whats more, thats what you pay us for. Bookshelf - for all my books Books - to put on said bookshelf. No micromanaging - Im an adult, I dont need micromanaging! Give me a task and some kind of idea of the direction you want me to take and leave me to do what you hired me for. If you wanted to do the job yourself, be my guest I can always find something else to do. If I need help, Ill ask. A forum for answering questions/learning Training/Seminars/Further Education (i.e. Masters Degrees, PHds etc) Life Insurance Policy Stock Options RRSP/401K Occasional Team Building Days - Sailing, War Games, Paintball, whatever you like And if you wanted to throw in a couple of nice personal perks: Gym Membership Golf Club Membership"} +{"doc_id": 295165, "author": "ANTHONY", "text": "Type: history -c; rm ~/.bash_history history -c clears your current sessions history, rm ~/.bash_history clears your long term history, so history -c; rm ~/.bash_history clears both."} +{"doc_id": 426238, "author": null, "text": "Quality chairs. A developer spends a lot of time during the day sitting. While a good quality adjustable chair may seem expensive, its cheaper than having a developer miss work because their back is injured from sitting in an Office Depot $79 special. In office catering. It doesnt have to be covered by the company, but having a secretary make a lunch run for the office is a great benefit. Not only does it enable the developer to work through their lunch, if they need/ want to, but it helps cut down on that time lost before lunch where everyone tries to coordinate about whos going where. Dual monitors, or one large(30+) high resolution widescreen format LCD. The productivity gain from having multiple monitors is amazing. Imagine a secretary having to work in an office with only a single file cabinet with just one drawer. Thats what development on a single 17 4:3 aspect ratio monitor is like. Quiet. Even if you cant afford private offices for the developers, providing the developers with a space separate from marketing and people whose jobs are to talk to your customer base, or the sales team is very important to a developer. A developer has chosen to work with computers, and not people, because they are likely not an extrovert. Therefore, keeping them sheltered from the sales teams pep-talks and team building exercises will be very valuable. If you have to have a giant open floor plan for the entire business, look at getting some banners or sound dampening to hang from the ceiling. Respect. Your developers are building the tools that your company uses to be more profitable. They may be making the software you sell, or the software that gives your company the advantage you need to be competitive, treat them with respect. Books. Developers need knowledge like plants need water. If a developer isnt given an outlet to learn new techniques and practices, they will search for it themselves. Give your developers a quarterly library fund, or have a company library they can get books from, and request new books be added to. You can create an internal website which the developers can vote for new additions to the library with, and buy them once a quarter. A subscription to an online library resource like Safaribooks.com A sense of being appreciated. You chose to hire these particular developers for a reason. Make them feel like they are special in some way. Have a quarterly/ monthly guest speaker, as you can afford it. If you cant afford a guest speaker, send some of them to conferences and workshops. Rotate your developers through conferences, so that everyone has the opportunity to go. Managers who understand what is involved in developing software. Developing software is not the same thing as digging a ditch or laying bricks. A developer will not spend 8/8 hours writing code. Plenty of time will be spent on research, whether requirements gathering/ clarification, or on the right approach to solve a particular problem. In physical engineering, prototypes and stepwise refinement are part of the iterative development of a product. The same is true in software. Just because the final check-in for a task is only a few text files, doesnt mean that the developer didnt spend a lot of effort refining that feature or bug fix. Guidance. As a recent college grad, your new developers are going to need someone whos been around to guide them to the correct technologies and practices to use to increase their value, both for the company and for themselves."} +{"doc_id": 426237, "author": null, "text": "casual dress will have to be up on the list for me. i used to work for an employer who would on occasion stock our department mini fridge with caffine (in our senerio it was Mountain Dew). the most important thing to me was chemistry. having coworkers that were intellegent enough to bounce ideas off of but social enough where we could invite each other to bar bqs. finally, i think being comfortable. i think the casual dress is a small preface to this, however, good chairs, good screens, performing machines, lowest stress conditions possible. being a developer deadlines are already enough to stress out about."} +{"doc_id": 426240, "author": "Ramy", "text": "In a few words, Id say room for growth. Im not the great hacker that most of the people on this site probably are (at least the ones with over 10k rep - I have yet to successfully answer a question after being here for a year). So for me, when I was starting out, I knew that college was, effectively, nothing. And I needed to do all my learning in the real world. Sure I got my CS degree, but I was way behind all the other CS majors because I didnt play with computers when I was young. I had hardly ANY experience with FILE SYSTEMS, for Gods sake, before I got to college. So, how do you create room for growth? To me, I think you have to create a professional, fun, AND academic environment. Professional meaning the usual things you read about in a business-type book (respect, clear expectations, blah blah). Fun meaning, aside from games, putting together a group of people who can sit in a room together and just shoot the breeze and be mostly laughing. Academic meaning an environment where everyone is a student and everyone is a teacher. This is probably the most difficult to foster in my opinion for any number of reasons."} +{"doc_id": 426235, "author": null, "text": "I just entered the job market and landed with a company where the hours (with the exception of occasional deadlines) are 9-5, 3 weeks vacation to start, and free lunch monday - thursday from different restaurants. This beat the other places that essentially said they would treat me like dirt and have me work long hours. The hours and benefits allow me to maintain a very healthy work/life balance, and this makes me more productive at work. Oh yeah, and dual monitors rock."} +{"doc_id": 491773, "author": "Miguel van de Laar", "text": "SOAP can be used without WSDL, but such services will not be found using the discovery mechanics offered by WSDL. WSDL could be used to describe any form of XML exchange between two nodes. So other formats of XML can be used. REST services can be described using WSDL version 2.0. An example of how to desribe REST services in WSDL 2.0 can be found here. See also this question on StackOverflow."} +{"doc_id": 164117, "author": "wbogacz", "text": "Google is making it easier, as reported today, to change the country to a different one. From Android Police on 2018/03/20: Now, Google appears to be taking one teeny tiny step toward making the switch easier for some people. If your Play Store country is set to country A (whether you lived there or, uhm, fooled Google into thinking you did because you made a purchase with a valid payment method and to a valid shipping address in that country), and the Play Store detects that youve moved to country B (likely based on your IP address), youll get a new option in the Account settings: Country and profiles. There youll see which country your Store account is on and an option to quickly switch to the new country - provided you have a valid payment method there. There are some caveats to this: Keep in mind that this Country and profiles option wont be there for everyone and wont let you switch to any country you want. It appears to only show up when it detects a discrepancy between your setting and your IP address."} +{"doc_id": 33053, "author": "James Fernandes", "text": "You can always try the Matrox DualHead2Go Digital ME. I use the Mini DisplayPort version and it works well except it can get quite fussy when I try to resume from sleep. Of course, Im using it with the MBP lid closed; it doesnt seem to have this problem when the lid is open. One other note: the device makes both monitors appear to the Mac as being a single, giant monitor. However, I havent really had any issue with this myself."} +{"doc_id": 196895, "author": "Dan", "text": "For all intents and purposes, CentOS is RedHat. CentOS, however is more flexible in what they can do. From the CentOS website: CentOS is developed by a small but growing team of core developers. In turn the core developers are supported by an active user community including system administrators, network administrators, enterprise users, managers, core Linux contributors and Linux enthusiasts from around the world. CentOS has numerous advantages over some of the other clone projects including: an active and growing user community, quickly rebuilt, tested, and QAed errata packages, an extensive mirror network, developers who are contactable and responsive, multiple free support avenues including IRC Chat, Mailing Lists, Forums, a dynamic FAQ. In a nutshell, CentOS is a community version of RedHat. You use Ubuntu, so you may understand this analogy a bit better: CentOS is to RedHat as Linux Mint is to Ubuntu."} +{"doc_id": 393511, "author": "blucz", "text": "If a developer showed up at an interview with a bunch of certifications on his resume, I would develop serious doubts as to his value system. There are zero situations in which Id value time spent obtaining a certification over time spent working on a project. Any project."} +{"doc_id": 557356, "author": "Lighty", "text": "Long answer: According to Microsoft, Winmail.dat is a file that keeps the rich-text formatting readable if sent by Outlook. I have sniffed around a bit and found out that attachments that act like the way Winmail.dat does exist. Then again, since its an attachment (and I hope you dont auto-open attachments from emails), you should only open it if you trust the person that send it to you, or have a good anti-virus that can scan your mail too. Short answer: yes, attachment malware called winmail.dat exists, exploitation of a valid file is possible, because it basically contains theming. More info on winmail.dat can be found on this website: http://support.microsoft.com/kb/278061"} +{"doc_id": 196908, "author": "pradeepchhetri", "text": "You can get this working by using: TEST=foo && echo $TEST"} +{"doc_id": 304, "author": "g .", "text": "\u2318+\u21e7+N: Create a new folder in Finder"} +{"doc_id": 196913, "author": "gertvdijk", "text": "Use netstat. For example $ netstat -nputw Active Internet connections (w/o servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name [...] tcp 0 0 192.168.25.222:22 192.168.0.134:42903 ESTABLISHED 32663/sshd: gert [p lists all UDP (u), TCP (t) and RAW (w) outgoing connections (not using l or a) in a numeric form (n, prevents possible long-running DNS queries) and includes the program (p) associated with that. Consider adding the c option to get output being updated continuously."} +{"doc_id": 305, "author": "Tobias Cohen", "text": "\u2318+\u21e5: Cycle through running apps \u2318+`: Cycle through windows for current app \u2318+W: Close current window \u2318+Q: Quit current app \u2318+,: Preferences dialog for current app \u2318+H: Hide current app (as long as its not Photoshop) ctrl+\u21e7+\u23cf: Sleep all displays"} +{"doc_id": 196915, "author": "Kratos", "text": "netstat is a good option.Use the parameters as required.(see its man pages) For example netstat -antup Here it can monitor all(a) listening numeric (n) tcp (t) and udp (u) process (p). You can also try the ss command . For reference use : SS Linux TCP / UDP Network and Socket Information"} +{"doc_id": 196917, "author": "St\u00e9phane Chazelas", "text": "If you just want to just log every connection attempt, the easiest is probably iptables LOG target on Linux (or the equivalent firewall logging feature on your system). If you need more information like duration of the connection and amount of data exchanged in both directions, then conntrackd (on Linux) is probably the best option. However note that those two above only log the traffic that goes through netfilter, which is generally all the traffic but doesnt account traffic generated with IP stacks in user space (like virtual machines or anything using raw sockets) or bridged traffic. For more general solutions, you can have a look at things like argus, bro-ids, sancp or ntop that log all sorts of information based on traffic they sniff on an interface."} +{"doc_id": 65847, "author": "Rampant", "text": "Real VNC or VNC Viewer are the same client (aside from platform) as used on Windows PCs for decades. OS X is designed to work with VNC protocol out of the box. As an added bonus, Real VNC is available as a free iOS app. The only real concerns you should have in using it are security hardening, ie restricting access to specific IP addresses or users."} +{"doc_id": 360768, "author": "Marinos An", "text": "This is what works for me: 7z a zipped.zip ./rootDir/* It will create a zip archive with root: any files/directories inside rootDir. e.g zipped.zip: file1.txt otherdir/ file2.txt Hidden files: As correctly pointed on @Shiva Wus comment the above does not include hidden files. This is also the case for the rest of the answers (at least the ones which preserve directory structure). One solution is to explicitly add the hidden paths wildcard. # The command below will include all files and directories starting with a dot. 7z a zipped.zip ./rootDir/* ./rootDir/.[!.]* OR # Same as above in one argument 7z a zipped.zip ./rootDir/{*,.[!.]*} result: zipped.zip: file1.txt .hiddenfile1.txt .hiddendir/ file3.txt otherdir/ file2.txt"} +{"doc_id": 98625, "author": "webchun", "text": "First try to restart your mac, then run the Disk Utility app. It will re-calculate your HD available space and show the correct numbers on Finder immediately"} +{"doc_id": 164161, "author": "Louis Maddox", "text": "Just joined this StackExchange to show some appreciation for Jeremys answer above, and also add the few lines of JS I used to export the tabs list (since copying with the cursor isnt ideal!) As Jeremy said, select Remote devices under More tools on Chrome devtools \u22ee icon (top right of the panel): set up USB debugging on your phone (under Settings\u21d2Developer options, root not required) note that you must enable the Developer options menu, its hidden by default to protect users on my phone this required tapping multiple times on the build number under Settings\u21d2About Device once this is done, plug your USB in and allow MTP connection when the Chrome devtools remote device panel is open, the phone will then request to allow USB debugging you can opt to always trust the computer Now the device is connected, open a 2nd devtools view on the devtools view from which you selected Remote devices to be able to retrieve the list of tabs using JavaScript note that you must have devtools in pop-out mode (use the vertical ellipsis symbol in the top right of the panel) to be able to get this up, otherwise Command+Option+J(MAC) Ctrl+Shift+J(WINDOWS) will just close the first devtools panel. expand the list from the first few items to all tabs by clicking Show more to script against the list, use the following few lines of code [entered in the console of the 2nd devtools window] To export a list of all URLs open on Chrome for Android, I chose to just make the list into a markdown formatted text string and copy it to the clipboard let bookmarkList = Array.from(document.querySelectorAll(.widget>.vbox)) .map(e => e.shadowRoot) .map(e => e && e.querySelector(.device-page-list)) .find(e => e); let bookmarks = Array.from(bookmarkList.querySelectorAll(.vbox)) .map(e => `[${e.querySelector(.device-page-title).innerHTML}](${e.querySelector(x-link).innerHTML})`); copy(bookmarks.join(\\n)); You will then have a list on your clipboard looking like this: [How can I export the list of open Chrome tabs? - Android Enthusiasts Stack Exchange](https://android.stackexchange.com/questions/56635/how-can-i-export-the-list-of-open-chrome-tabs) [Get Started with Remote Debugging Android Devices | Tools for Web Developers | Google Developers](https://developers.google.com/web/tools/chrome-devtools/remote-debugging/) [How To Enable USB Debugging On Your Android Phone](https://www.groovypost.com/howto/mobile/how-to-enable-usb-debugging-android-phone/) [Configure On-Device Developer Options | Android Studio](https://developer.android.com/studio/debug/dev-options.html) ..."} +{"doc_id": 98634, "author": "Pokermike", "text": "My original solution (crossed out below) no longer works. Things that do work: If you have a Mac keyboard, press the Help key to toggle between insert and replace mode If you have a Windows keyboard, press the Insert (or Ins) key to toggle between insert and replace mode Restart Slack (quit and relaunch); refreshing no longer works I contacted Slack support and was told to use \u2318+R to do a quick refresh and reset the mode from overwrite to insert. From Slack support team: > Thanks for writing in about this! Weve seen this occur on Mac computers recently as well, and the good news is there is an easy way to switch! To reset the mode from overwrite to insert, this should be possible with a quick refresh using Cmd+R Explanation: This is because slack is using electron, which is essentially a wrapper around a browser to create a desktop app. Other web commands work like \u2318+[ and \u2318+] to traverse your browser history backward and forward, respectively."} +{"doc_id": 65872, "author": "eskatos", "text": "The best solution Ive found today (2017) is to use Homebrew and Cask to install osxfuse and sshfs: brew install --cask osxfuse brew install sshfs And then: sshfs username@hostname:/remote/directory/path /local/mount/point -ovolname=NAME It works! :-)"} +{"doc_id": 65875, "author": "SamWN", "text": "Unless youre using backtrack/kali for a specific task: NO. Treat the super user as you would a loaded gun: if you have an immediate need and intention to use it: OK. If you can solve your problem in any other manner, however (e.g. sudo), do that."} +{"doc_id": 33111, "author": "Steve", "text": "Take a look at the free and simple to use yEd, I believe this would fit about right for your 2 yr old question :)"} +{"doc_id": 98647, "author": "Willempie", "text": "For some reason the disk my Mac was booting from was the macOS Installer Disk, all I had to do: Hold down the option (-alt) key before the Apple logo shows. Then just select your Macintosh HD (or how you named the volume when you installed macOS). This weird boot thing happened after I ran an update on High Sierra. If this doesnt resolve the issue, you could try the chosen answer. It could be that your problem the problems on your Mac are worse. Then I would suggest to backup all your data in safe mode (or from a terminal in recovery mode) and do a clean install of macOS from a USB stick or from internet recovery."} +{"doc_id": 196959, "author": "jlliagre", "text": "If you run Solaris or a derivative, have a look at conntrack"} +{"doc_id": 33125, "author": "graup", "text": "For PDFs: At least on Mac OS X, the Adobe Reader application (version 10+) has this feature in its print dialog. Theres an option to print as poster, meaning 100% scale across multiple pages."} +{"doc_id": 357, "author": "Davide Gualano", "text": "Power keys: Ctrl+\u23cf: Are you sure you want to shut down your computer dialog message appears: Ctrl+\u2318+\u23cf: restart the computer Ctrl+\u2325+\u2318-\u23cf: shut down the computer \u2318+\u2325+\u23cf: puts the computer in sleep mode \u21e7+Ctrl+\u23cf: puts the monitor in sleep mode"} +{"doc_id": 366, "author": "Fishtoaster", "text": "Open a Find Window (cmd-f) or do a spotlight search and select show all Hit the little + icon (to the right of the save button) Select File Type as a search criteria and select the correct one. Rerun the search. (Disclaimer, Im not on a Mac right now :( so my exact instructions might be a little off)."} +{"doc_id": 367, "author": "Am1rr3zA", "text": "Search by Kind One of the most useful ways to narrow down a search is by using the kind: keyword. This allows you to restrict your list of results to a certain file format. For instance, if you type time machine kind:pdf, Spotlight will pull up only PDF files containing the words time and machine. You can also limit your search to e-mail messages, music files, System Preferences, applications, and more. While the original Spotlight recognized only a limited number of file types, the Leopard version can look for files created by specific applications, as well as certain file formats. Searching for kind:mp3 or kind:tiff will find files in one of those formats, and searching for kind:pages or kind:powerpoint will show only documents created in one of those programs. For a list of useful keywords, see \u201cMy Kind of Keyword. But remember, for the keywords to work, you must have the appropriate categories enabled in Spotlight\u2019s preferences. you can get more info here or here."} +{"doc_id": 295278, "author": "eli", "text": "For bash, basically: instead of using cd one can use pushd to change directories. With practical usage: the history of visited directories is saved (correctly: stacked) and one can switch between them: pushd /home; pushd /var; pushd log To see the stack use dirs and for easier navigation (to get the numbers of the stack-entries use: dirs -v Output: me@myhost:/home$ dirs -v 0 /home 1 /var 2 /tmp Now utilize these numbers with cd and ~ like: cd ~1 But these numbers are rearranged now and position 0 will change, so just pushd the directory to the top position twice (or use a dummy on position 0) like: me@myhost:/home$ dirs -v 0 /home 1 /home 2 /var 3 /tmp Now 1..3 will keep their position (To release the current directory from the stack/deleting it from history, use popd)"} +{"doc_id": 196982, "author": "txtechhelp", "text": "tcpdump allows you to see all IP traffic flowing to/from a specific interface with the ability to filter based on certain criteria. tcpdump is typically installed on most *nix systems by default, if not theres usually a port somewhere to grab it for your specific distro."} +{"doc_id": 65916, "author": "Mike Ciaraldi", "text": "Back around 1990 I was working on a project with a guy named Tom. We were using a SUN server running SunOS (a Unix derivative, predecessor to Solaris). This was back in the days before CD drives and flash drives, so if you messed up the OS on your hard drive there was no way to recover. Tom used to routinely log in as root. I told him that was a bad idea, but he did not listen. One day I heard him say Uh-oh. He had meant to type comething like this: mv something* . Unfortunately he left off the final dot, so the shell expanded all the file and directory names which matched this pattern. Then the mv command used whatever ended up as the final name in the list as the destination directory, and moved everything else into it. Also unfortunately, he was currently at the root directory, so basically the entire file system got moved into one of its subdirectories. I used up-arrow to bring back the previous command and saw what had happened. The first thing I then said was, Dont log off! Or you will never be able to log in again. No problem, right? We could just move everything back. We could, except that the mv command was not one of the built-in commands of the shell. Instead, it was an executable program, stored in one of the files which had been moved. Luckily, ls was a built-in command, so after using ls to confirm where the commands had moved to, I was able to find the mv command, invoke it with its full path name, and put things back where they were supposed to be. And then I told him, Tom, this is why it is a bad idea to routinely log in as root."} +{"doc_id": 33149, "author": "Vaibhav Bajpai", "text": "I use Alfred \u2192 The universal hotkey allows me to activate it using a keyboard."} +{"doc_id": 131458, "author": "pesche", "text": "ShootMe does work for me on the Galaxy Tab (2.2), even if I did not root it and the ShootMe page says, that it works only for rooted devices."} +{"doc_id": 33158, "author": "Galas", "text": "You can run sudo hostname Name-Of-My-Computer in Terminal to change the name."} +{"doc_id": 98701, "author": "Ariel Allon", "text": "Id recommend checking if fsck is holding your disk hostage upon connection. This was it for me. Running ps aux | grep fsck revealed: root 2587 7.3 0.1 4363584 21780 ?? U 10:56PM 2:22.54 /System/Library/Filesystems/exfat.fs/Contents/Resources/./fsck_exfat -y /dev/rdisk2s1 So the solution was a sudo kill -9 2587 (insert your PID instead of 2587) (or sudo pkill -f fsck). As soon as that ran, my Volume immediately mounted. Oh the irony of fsck being the problem..."} +{"doc_id": 65936, "author": "Theodore Murdock", "text": "According to an answer on a similar question, its possible to use a program called Karabiner to detect the Logitec Windows-specific keyboard events sent by the M560, and translate them to regular keyboard or mouse button events that tools such as BetterTouchTool can pick up on. Please see the original answer on the duplicate question for full details, including a Karabiner configuration file for the M560."} +{"doc_id": 65941, "author": "nohillside", "text": "GNU grep is not part of coreutils. To install, run brew install grep As with coreutils, this doesnt automatically replace the existing grep ==> Caveats All commands have been installed with the prefix g. If you need to use these commands with their normal names, you can add a gnubin directory to your PATH from your bashrc like: PATH=/usr/local/opt/grep/libexec/gnubin:$PATH So after installing you can either use ggrep, gegrep and gfgrep; or extend PATH as shown above to use grep etc. The second option may confuse some macOS specific scripts though in case the options differ."} +{"doc_id": 408, "author": "Nippysaurus", "text": "All of the shortcuts are my favorite! Specifically (based on frequency of use) \u2318+H: Hide application"} +{"doc_id": 65946, "author": "Ajax", "text": "option + command + i is the winner"} +{"doc_id": 415, "author": "Artem Pakk", "text": "Starting from iOS4, while listening to music in iPod app the screen turns on for a second to show what song is currently playing. It wont happen if the iPhone is in your pocket."} +{"doc_id": 418, "author": "Martin Marconcini", "text": "You should take a look at mount\u2019s help: man mount Upon closer inspection you\u2019ll see that the filesystem\u2019s type is: mount -t smbfs //username:password@MACHINENAME/SHARENAME /SomeLocalFolderOfChoice Password (and theoretically username) are optional. The result of the above command will be no output (if all went ok), but a cd /SomeLocalFolderOfChoice, should produce the remote results. Please note that SomeLocalFolderofChoice must exist. You can also use mount_smbfs to replace the mount -t smbfs."} +{"doc_id": 419, "author": "Studer", "text": "You could easily achieve this using mount_smbfs (which is, actually, a wrapper for mount -t smbfs) : mount_smbfs //user@SERVER/folder ./mntpoint Optionally, add the workgroup : mount_smbfs -W workgroup //user@SERVER/folder ./mntpoint You could, of course, change the ./mntpoint (for something like /Volumes/smb). After doing this, simply go to ./mntpoint to browse your data. To unmount, using the following command : umount ./mntpoint"} +{"doc_id": 131498, "author": null, "text": "You can use Javascripts alert method to return the source code, type this in the address bar: javascript: alert(document.getElementsByTagName(html)[0].innerHTML);"} +{"doc_id": 262574, "author": "Evgeny Vereshchagin", "text": "systemctl restart systemd-journald You can lose your logs: see https://github.com/systemd/systemd/issues/2236 mkdir /var/log/journal There is a change in v208: systemd-journald will no longer adjust the group of journal files it creates to the systemd-journal group. Instead we rely on the journal directory to be owned by the systemd-journal group, and its setgid bit set, so that the kernel file system layer will automatically enforce that journal files inherit this group assignment. A tmpfiles.d(5) snippet included in systemd will make sure the setgid bit and group are properly set on the journal directory if it exists on every boot. So, you should run something like systemd-tmpfiles --create --prefix /var/log/journal after mkdir /var/log/journal See also: Bug: users from the group systemd-journal couldnt read some journal files"} +{"doc_id": 65966, "author": "Gordon Davisson", "text": "First: the name rootless is misleading, since theres still a root account, and you can still access it (the official name, System Integrity Protection, is more accurate). What it really does is limit the power of the root account, so that even if you become root, you dont have full control over the system. Essentially, the idea is that its too easy for malware to get root access (e.g. by presenting an auth dialog to the user, which will cause the user to reflexively enter the admin password). SIP adds another layer of protection, which malware cant penetrate even if it gets root. The bad part of this, of course, it that it must also apply to things youre doing intentionally. But the restrictions it places on root arent that bad; they dont prevent most normal system customization. Heres what it restricts, even from root: You cant modify anything in /System, /bin, /sbin, or /usr (except /usr/local); or any of the built-in apps and utilities. Only Installer and software update can modify these areas, and even they only do it when installing Apple-signed packages. But since normal OS X-style customizations go in /Library (or ~/Library, or /Applications), and unix-style customizations (e.g. Homebrew) go in /usr/local (or sometimes /etc or /opt), this shouldnt be a big deal. It also prevents block-level writes to the startup disk, so you cant bypass it that way. The full list of restricted directories (and exceptions like /usr/local and a few others) is in /System/Library/Sandbox/rootless.conf. Of course, this file is itself in a restricted area. When you upgrade to El Capitan, it moves any unauthorized files from restricted areas to /Library/SystemMigration/History/Migration-(some UUID)/QuarantineRoot/. You cant attach to system processes (e.g. those running from those system locations) for things like debugging (or changing what dynamic libraries they load, or some other things). Again, not too much of a big deal; developers can still debug their own programs. This does block some significant things like injecting code into the built-in Apple apps (notably the Finder). It also means that dtrace-based tools for system monitoring (e.g. opensnoop) will not be able to monitor & report on many system processes. You cant load kernel extensions (kexts) unless theyre properly signed (i.e. by Apple or an Apple-approved developer). Note that this replaces the old system for enforcing kext signing (and the old ways of bypassing it). But since v10.10.4 Apple has had a way to enable trim support for third-party SSDs, the #1 reason to use unsigned kexts has gone away. Starting in Sierra (10.12), some launchd configuration settings cannot be changed (for example, some launch daemons cannot be unloaded). Starting in Mojave (10.14), access to users personal information (email, contacts, etc) is restricted to apps that the user has approved to access that info. This is generally considered a separate feature (called Personal Information Protection, or TCC), but its based on SIP and disabling SIP disables it as well. See: What and how does macOS Mojave implement to restrict applications access to personal data? Starting in Catalina (10.15), protection of most system files is strengthened by storing them on a separate read-only volume. This is not strictly part of SIP, and is not disabled by disabling SIP. See: WWDC presentation on Whats New in Apple [Catalina] File Systems and Whats /System/Volumes/Data?. Starting in Big Sur (11.x), the read-only system volume is now a Sealed System Volume (a mounted snapshot rather than a regular volume), so making changes to it is even more complicated. See: the Eclectic Light Company article Big Sur boot volume layout. If you dont want these restrictions -- either because you want to modify your system beyond what this allows, or because youre developing & debugging something like kexts that arent practical under these restrictions, you can turn SIP off. Currently this requires rebooting into recovery mode and running the command csrutil disable (and you can similarly re-enable it with csrutil enable). Modifying the system volume in Catalina requires disabling SIP, then mounting the volume with write access (and then rebooting and turning SIP back on is recommended). In Big Sur, additional steps are required to disable authentication of the system volume before changes, and afterward create a new snapshot. You can also selectively disable parts of SIP. For example, csrutil enable --without kext will disable SIPs kernel extension restriction, but leave its other protections in place. But please stop and think before disabling SIP, even temporarily or partially: do you really need to disable it, or is there a better (SIP-compliant) way to do what you want? Do you really need to modify something in /System/Library or /bin or whatever, or could it go in a better place like /Library or /usr/local/bin etc? SIP may feel constraining if you arent used to it, and there are some legitimate reasons to disable it, but a lot of what it enforces it really just best practice anyway. To underscore the importance of leaving as much of SIP enabled as much of the time as possible, consider the events of September 23, 2019. Google released an update to Chrome that tried to replace the symbolic link from /var to /private/var. On most systems, SIP blocked this and there were no bad effects. On systems with SIP disabled, it rendered macOS broken and unbootable. The most common reason for disabling SIP was to load unapproved (/improperly signed) kernel extensions (specifically video drivers); if theyd only disabled the kext restriction, they would not have been affected. See the official Google support thread, a superuser Q&A on it, and an Ars Technica article. References and further info: WWDC presentation on Security and Your Apps, a good explanation by Eldad Eilam on quora.com, the Ars Technica review of El Capitan, and an Apple support article on SIP, and a deep dive by Rich Trouton (who also posted an answer to this question)."} +{"doc_id": 131504, "author": "Lie Ryan", "text": "This depends on the screen technology. For instance, (it was said that) Android 2.3 Gingerbread has dark theme since Googles latest flagship device Nexus S uses (Super) AMOLED display which consumes less energy when displaying dark color since AMOLED produces its own light and darker color emits less photons. Contrasts with LCD display which uses a backlight (a fixed number of photons) and the LCD crystals filters those colors it needs. The crystals on an LCD displays though, actually consume slightly less power when displaying white since it takes more power to strain the crystal to block more light. Screen color does not affect the backlighting. There are certain display technologies in TVs that tries to give dynamic backlighting by dimming the screen when displaying darker image. Im not aware of any device that actually ships with that type of screen though."} +{"doc_id": 131505, "author": "Matt", "text": "On OLED, AMOLED and Super AMOLED screens: Yes LCD screens: No"} +{"doc_id": 33210, "author": "user41486", "text": "Try this one. I used it to find lost hidden account. dscl . list /Users | grep -v ^_"} +{"doc_id": 65979, "author": "Mazzieh71", "text": "You can delete them through FileZilla or similar files manager."} +{"doc_id": 33212, "author": "nohillside", "text": "OmniGroup offers a free utility called OmniDisksweeper which lists files/folders sorted by size. OmniDiskSweeper is really great at what it does: showing you the files on your drive, in descending order by size, and letting you delete them easily! It scans your disks as quickly as possible and shows you the facts \u2014 if a file doesnt make the cut to stay, just click the big Delete button and be done with it. Its a fast, easy way to find those large files cluttering up your drive and clearing them out for new, better things. Make sure you want them gone, though. Theres no going back"} +{"doc_id": 131521, "author": "Keith Twombley", "text": "Look for an app called View Web Source in the market and install it. Then when browsing, open your menu and tap share, in the list that pops up choose View Web Source."} +{"doc_id": 65998, "author": "Zweedeend", "text": "In the Mail app, go to Window > Activity and try to cancel everything it is doing. That solved it for me"} +{"doc_id": 98778, "author": "Milkmannetje", "text": "Allright, just confirmed that this product is not fully compatible with a MacBook. The dock doesnt communicate powerinfo with the Macbook so doesnt supply it with enough power. (Charging takes more than 10 hours.) Other peripherals do work. USB: Works Ethernet: Works Display: VGA/Displayport/HDMI Works"} +{"doc_id": 33245, "author": "P.K.", "text": "Assuming that each picture you shoot is 1 MB, 20000 pictures will take up 20000 MB which is approximately 20 GB. Now if you put these on your iPad 2, you have around 40 GB to work with keeping the reserved space in mind, which is okay. Looking at that information, you can upload 40000 more if you need. An alternative is to upload them all on a photo-hosting website to save all the 20 GB if you dont look at them often. Responding to your second query, yes: it is quite responsive. My uncle has 8436 pictures and 129 videos on his iPad 2 and it is working perfectly."} +{"doc_id": 66015, "author": "JD Leonard", "text": "Take screenshot. Paste screenshot into Evernote. Copy screenshot from Evernote. Paste screenshot where intended."} +{"doc_id": 131553, "author": "Steve Mould", "text": "Hey, I just ran a load of test to figure this out. They are vaguely scientific tests! I figured out that I was saving 20% battery by switching to dark wallpaper and dark themed apps. Have a look through my write up, you should be able to work out the saving on your phone: http://blog.stevemould.com/phone-battery-save-black-wallpaper/"} +{"doc_id": 360932, "author": "Vishal Sahu", "text": "I generally use `command` to place its output as argument to another command. For e.g., to find resource consumed by process foo on freebsd will be: procstat -r `pgrep -x foo` Here, pgrep is used to extract the PID of process foo which is passed to procstat command which expects PID of process as argument."} +{"doc_id": 393708, "author": "Stephen C", "text": "The cardinal rules are: Follow the projects existing coding standard. If there is no coding standard and you are editing an existing code-base owned by someone else - be consistent with the style of the existing code, no matter how much you like / dislike it. If you are working on a green-field project - discuss with other team members, and come to a consensus on a formal or informal coding standard. If you are working on a green-field project as the sole developer - make up your own mind, and then be ruthlessly consistent. Even if you have no external constraints on you, it is (IMO) best to look for an existing (widely used) coding standard or style guideline, and try and follow that. If you roll your own style, theres a good chance that you will come to regret it in a few years. Finally, a style that is implemented / implementable using existing style checkers and code formatters is better than one that needs to be enforced manually."} +{"doc_id": 492012, "author": "Hexodus", "text": "The PHP-Documentation clearly says that you can use short echo tags safely: 5.4.0 The tag Keyboard > Input Sources. Add a keyboard picture from the KB page too, if that would help. That will make it easier for future Googlers. Further info: You can change the keys in System Prefs > Keyboard > Shortcuts > Keyboard... though it doesnt list the reverse direction, it does still work when you add shift to that new combo. I tested by moving mine from ` (and ~ ) to \u00a7 (and \u00b1 ) You can use the alternative of Ctrl \u2303 F4 [visible in the prefs window above] but that almost indiscriminately marches through every single open window on all Spaces, without switching to the correct Space each time. Its really not too useful unless you use a single Space, just included here for completeness."} +{"doc_id": 66126, "author": "Matthieu Riegler", "text": "Command+` is the way to go on OS X to change between windows of the same application."} +{"doc_id": 164431, "author": "Jonathan", "text": "How to Speed up Slow Android Remember the day you bought your android? It wasnt slow. It was so fast. What happened? Did the silicon get slower? Did the bits get clogged? Of course not. Youre just running more invisible background services. First, disable auto updates in Samsung galaxy store (if applicable), in settings > software updates, In secret developer options menu (look it up) and Google play store This is important because otherwise they just reinstall / update themselves. Then look for apps that open automatically after removing them from via device maintenance > clear memory & developer options > running services and uninstall or reset them to factory default to as many as you can (I also remove permissions from them so they cant read from storage secretly in the background etc.). Some system apps you may want to keep, such as Google play services etc. Just uninstall the ones you are confident you understand are not needed Remember, clearing the memory does NOT speed up the device, it only shows you what apps autoload background processes so you can uninstall them. A good example of this is Instagram, if you notice it keeps running while its closed, because it will preload dozens of Instagram videos in your feed before you even open the app. While preloading is generally seen as an optimization, if you have enough apps aggressively preloading at the same time, and security apps monitoring those apps, and RAM filling up which then kicks off garbage collection on the Java runtime, and possibly even memory paging, plus any activity you actually request, plus reporting your location to Google, plus reporting your location to find your phone programs, plus checking traffic on your future commute, plus parsing your calendar for AI usage with Google Home, plus updates checking, plus notification querying, etc etc, then you may find that your device is doing far more activity than it can handle. Simple way to test this is to turn on Max Battery Saving option and see if the battery and responsiveness improves. If so, its in large part because all the extensive background processing is disallowed. Also remember that disabling auto updates does not speed up the phone per se, it just allows you prevent future updates from causing unexpected performance degradation. It makes you the gatekeeper so your phone cannot magically slow down overtime, but rather lets you pick and choose what apps you trust to update (and of course, update the apps you love.) Its like Kon Mari method for your phone. Remember, phones dont get slow over time. Bad programming in Updates cause phones to get slow. And this will get you back to a smooth android like the day you bought it (also good for battery) Ive had outstanding success with this. And my frame rate is 2x better as well as app open speed. Its fantastic. Oh and also enable background ANRs so you can see invisible stalls"} +{"doc_id": 197203, "author": "Gert van den Berg", "text": "Another way would be to use find and xargs: (this might include a . directory in the zip, but it should still extract correctly. With my test, zip stripped the dot before compression) find . -type f -exec zip zipfile.zip {} + (The + can be replaced with \\; if your version of find does not support the + end for exec. It will be slower though...) This will by default include all sub-directories. On GNU find -maxdepth can prevent that."} +{"doc_id": 33391, "author": "Christian Long", "text": "This article from Lifehacker.com.au suggests setting the Dock autohide delay to 1000 seconds, like so: defaults write com.apple.dock autohide-delay -float 1000; killall Dock To restore the default behavior: defaults delete com.apple.dock autohide-delay; killall Dock The author says he sets the delay to two seconds, so he can still get to the Dock in those rare cases when its needed."} +{"doc_id": 637, "author": "Kyle Cronin", "text": "Your ringer volume adjusts the volume of the sound coming out of the phone when its ringing. If the phone is not playing media (i.e. no sound is coming out of it) then the volume buttons on the side adjust the ringer volume. If you are playing media (music, video, etc) out the internal speakers, then the buttons on the side adjust the volume of that media coming out those speakers. Finally, when you have headphones plugged in, the iPhone knows this and has a separate volume for headphones. Using the volume buttons will adjust the headphones volume when headphones are plugged in. The reason why these are separate is because sometimes you want to mute your ringer, but still want to listen to media (or vice versa), and because the headphone and internal speaker volumes are different and need to be adjusted separately."} +{"doc_id": 164478, "author": "GorvGoyl", "text": "Ill share 4 methods to play youtube in background and with the screen lock. Install Brave browser, enable media play background option in site settings, and visit https://m.youtube.com use Youtube vanced app https://youtubevanced.com/ use YMusic app use NewPipe app"} +{"doc_id": 230024, "author": "jgr", "text": "Identify stream numbers: $ ffmpeg -i in.mp4 ... Stream #0:0: Video: ... Stream #0:1: Audio: ... Stream #0:2: Audio: ... Use -map _file_:_stream_ to select which streams to process and output $ ffmpeg -i in.mp4 -map 0:0 -map 0:2 -vcodec copy -acodec copy out.mp4 see: https://ffmpeg.org/ffmpeg.html#Advanced-options"} +{"doc_id": 33425, "author": "xander", "text": "I have found the answers to this question helpful for this as well. For an easy way to invoke App Expose to get at minimized windows for current app use Cmd-Down; Down; Enter."} +{"doc_id": 262804, "author": "chaos", "text": "Systemd is backward compatible with SysV init scripts. According to LSB 3.1, the init script must have informational Comment Conventions, defining when the script has to start/stop and what is required for the script to start/stop. This is an example: ### BEGIN INIT INFO # Provides: my-service # Required-Start: $local_fs $network $remote_fs # Required-Stop: $local_fs $network $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: start and stop service my-service # Description: my-service blah blah ... ### END INIT INFO This is a commented section which is ignored by SysV. On the other hand, systemd reads that dependency information and runs those scripts depending on that. But there is one point, where systemd and SysV differ in terms of init scripts. SysV executes the scripts in sequential order based on their number in the filename. Systemd doesnt. If dependencies are met, systemd runs the scripts immediately, without honoring the numbering of the script names. Some of them will most probably fail because of the ordering. There are a lots of other incompatibilities that should be considered. If there are init scripts and .service files for the same service, systemd will execute both, as soon as the dependencies are met (in case of the init script, those defined in the LSB header)."} +{"doc_id": 98966, "author": "Sergii Ivashchenko", "text": "Just hold the Option key while taking a window screen shot."} +{"doc_id": 328349, "author": "ness-EE", "text": "Create (or edit if it exists) the following ~/.ssh/config file: Host * UseKeychain yes AddKeysToAgent yes IdentityFile ~/.ssh/id_rsa"} +{"doc_id": 131744, "author": "Bryan Denny", "text": "Another possible cause: is your phones memory full? If your phone is low on free memory, it will not sync any new data to it. Youll have to uninstall some apps or move them to the SD card to resolve this."} +{"doc_id": 131749, "author": "Stefano", "text": "There are actually several alternatives to the official Google Android market (aka Play Store). I personally like to search for new apps from my PC browser, and a bit less from my phone. Most (all?) of the markets nowadays offer both, via a dedicated phone-app that youll have to install, except from the official one which is already there. Until, well, yesterday, the official market would not have been available from your PC (meaning: on a PC web browser). Now it is, with a neat push-to-your-phone one click installation that is taking away one of the advantages of the competition (see AppBrain). The alternative markets, though, have still their peculiarities. Ill make a short summary but dont expect it to highlight all of the differences. Some markets (e.g. AndSpot) do not offer very specific features to users, but try to gather developers by offering advanced features such as easy stats. Amazon AppStore: latest big entry. countries limitations; catalog looking very promising with some (exclusive?) good pay-for apps; also features special offers and daily freebies. Def worth checking out AppBrain: born to let you install Android Market applications directly from your PC web browser, also allows you to discover new apps based on the ones you have. Its a meta-store (my favourite before the official market update) APKPure: dont need any registration to use it SlideME: they provide applications to niche markets, based on geographic location, payment method or even types of applications that users cant find in traditional channels Samsung Apps (must be installed on Samsung Galaxy devices, or else you cant use this): Samsung app store, which of course requires you to own a Samsung Android device F-Droid they distribute FOSS open source packages that pass audit and verification to ensure that code have no malicious parts. GetJar: my latest discovery. Quite a lot of apps, and they have specials (GOLD) commercial apps for free. Extremely interesting platform for developers, since they seem to offer above-the-average marketing and PS activities! TorrApk: its free and it distributes only free Android apps through their apk files. If you are a developer you can get your account and publish your apps. (NB. Not sure how legit the apps there are, though) appsfire: used to be yet another app for discovering official market applications (iOS/Android), now an app advertising agency Ill add that theres a market for Adult Apps too now; and a lot of review/forum sites will link to one or more of these markets, e.g., AndroidTapp or AndroidPit AndroLib. I suggest you click on some of these links and see for yourself if the look/applications suit your style! Thanks to various sources, in particular this thenextweb.com article, and a lot of other stackexchange editing! For neverending memory, the following are no longer available: AndSpot: focused around an app for sharing and discovering apps Handango by appia.com: a huge, cross-platform (Symbian/Android/Java/Winmobile...) market aproov: a quite different web look. Register to download via a specific phone app. MobiHand OnlyAndroid: more focused on (expensive) pay apps, but offers discounts and free deals"} +{"doc_id": 131750, "author": "jmbouffard", "text": "In Android, applications never need to be closed and the OS is taking care of cleaning the memory of any remaining applications when the resources are needed elsewhere. Using a task killer will just break the normal operation of the OS and could even slow down the device because the applications will always have to be reloaded from the start. Android application life cycle is working in a way that applications that are not on top are moved in a paused state that doesnt use any cpu anymore; only the memory content is kept in case the application is opened again; and the memory will be freed if needed. This being said, the application developer has the responsibility to follow the application life cycle when he creates his application so some bugs or mistakes can happen that will result in an application that never really stops or pause. If you encounter such application and what to close it you can do it through the Settings -> applications -> Manage applications by selecting Force stop on the application. But I would really not recommend using a task killer that is constantly running on the device."} +{"doc_id": 197285, "author": "Matt", "text": "Update As mikemaccana notes, the systemd journal is now the standard logging device for most distros. To view the stdout and stderr of a systemd unit use the journalctl command. sudo journalctl -u [unit] Original Answer By default stdout and stderr of a systemd unit are sent to syslog. If youre using the full systemd, this will be accesible via journalctl. On Fedora, it should be /var/log/messages but syslog will put it where your rules say. Due to the date of the post, and assuming most people that are exposed to systemd are via fedora, you were probably hit by the bug described here: https://bugzilla.redhat.com/show_bug.cgi?id=754938 It has a good explanation of how it all works too =) (This was a bug in selinux-policy that caused error messages to not be logged, and was fixed in selinux-policy-3.10.0-58.fc16)"} +{"doc_id": 262831, "author": "osvein", "text": "POSIX Base Specifications, Issue 7 on /tmp: The following directory shall exist on conforming systems and shall be used as described: /tmp A directory made available for applications that need a place to create temporary files. Applications shall be allowed to create files in this directory, but shall not assume that such files are preserved between invocations of the application. The File Hierarchy Standard 2.3 on /tmp: The /tmp directory must be made available for programs that require temporary files. Programs must not assume that any files or directories in /tmp are preserved between invocations of the program. Rationale IEEE standard P1003.2 (POSIX, part 2) makes requirements that are similar to the above section. Although data stored in /tmp may be deleted in a site-specific manner, it is recommended that files and directories located in /tmp be deleted whenever the system is booted. FHS added this recommendation on the basis of historical precedent and common practice, but did not make it a requirement because system administration is not within the scope of this standard. POSIX does not specify /var/tmp. The FHS does though: The /var/tmp directory is made available for programs that require temporary files or directories that are preserved between system reboots. Therefore, data stored in /var/tmp is more persistent than data in /tmp. Files and directories located in /var/tmp must not be deleted when the system is booted. Although data stored in /var/tmp is typically deleted in a site-specific manner, it is recommended that deletions occur at a less frequent interval than /tmp."} +{"doc_id": 164538, "author": "MetaColon", "text": "If youre phone is rooted or you have a custom recovery (like TWRP), this is actually much easier. The only thing youll have to do, is to add the following line to your build.prop file located in /system: ro.config.media_vol_steps=30 Where 30 represents the number of steps. This can be done with a root file explorer (like Root Browser) or via VI in the TWRP terminal or via adb."} +{"doc_id": 623291, "author": "dan", "text": "A DHCP server is answering with a DHCP OFFER to provide an IP address. It knows the target MAC and IP, hence will use a unicast IP packet, toward the originating Ethernet address, hence a unicast Ethernet frame too. If you want to see it for real, just enter the following command on your DHCP server: tcpdump -imy_ethernet_device-e src port bootps where my_ethernet_device is the name of the Ethernet interface on which your DHCP server is replying to your network."} +{"doc_id": 99002, "author": "Paul", "text": "Yes, you can use multiple charging cases. Put the AirPods in either charging case to see the AirPod charge level and the battery level of that particular charging case. At the moment, once of my cases shows 94%. Moving the AirPods to the other case shows a 100% case charge level."} +{"doc_id": 262842, "author": "JdeBP", "text": "chaos answer is what some documentation says. But its not what systemd actually does. (Its not what van Smoorenburg rc did, either. The van Smoorenburg rc most definitely did not ignore LSB headers, which insserv used to calculate static orderings, for starters.) The Freedesktop documentation, such as that Incompatibilities page, is in fact wrong, on these and other points. (The HOME environment variable in fact is often set, for example. This went wholly undocumented anywhere for a long time. Its now documented in the manual, at least, but that Freedesktop WWW page still hasnt been corrected.) The native service format for systemd is the service unit. systemds service management proper operates solely in terms of those, which it reads from one of nine directories where (system-wide) .service files can live. /etc/systemd/system, /run/systemd/system, /usr/local/lib/systemd/system, and /usr/lib/systemd/system are four of those directories. The compatibility with van Smoorenburg rc scripts is achieved with a conversion program, named systemd-sysv-generator. This program is listed in the /usr/lib/systemd/system-generators/ directory and is thus run automatically by systemd early in the bootstrap process at every boot, and again every time that systemd is instructed to re-load its configuration later on. This program is a generator, a type of ancillary utility whose job is to create service unit files on the fly, in a tmpfs where three more of those nine directories (which are intended to be used only by generators) are located. systemd-sysv-generator generates the service units that run the van Smoorenburg rc scripts from /etc/init.d, if it doesnt find a native systemd service unit by that name already existing in the other six locations. systemd service management only knows about service units. These automatically (re-)generated service units are written to invoke the van Smoorenburg rc scripts. They have, amongst other things: [Unit] SourcePath=/etc/init.d/wibble [Service] ExecStart=/etc/init.d/wibble start ExecStop=/etc/init.d/wibble stop Received wisdom is that the van Smoorenburg rc scripts must have an LSB header, and are run in parallel without honouring the priorities imposed by the /etc/rc?.d/ system. This is incorrect on all points. In fact, they dont need to have an LSB header, and if they do not systemd-sysv-generator can recognize the more limited old RedHat comment headers (description:, pidfile:, and so forth). Moreover, in the absence of an LSB header it will fall back to the contents of the /etc/rc?.d symbolic link farms, reading the priorities encoded into the link names and constructing a before/after ordering from them, serializing the services. Not only are LSB headers not a requirement, and not only do they themselves encode before/after orderings that serialize things to an extent, the fallback behaviour in their complete absence is actually significantly non-parallelized operation. The reason that /etc/rc3.d didnt appear to matter is that you probably had that script enabled via another /etc/rc?.d/ directory. systemd-sysv-generator translates being listed in any of /etc/rc2.d/, /etc/rc3.d/, and /etc/rc4.d/ into a native Wanted-By relationship to systemds multi-user.target. Run levels are obsolete in the systemd world, and you can forget about them. Further reading systemd-sysv-generator. systemd manual pages. Freedesktop.org. Environment variables in spawned processes. systemd.exec. systemd manual pages. Freedesktop.org. https://unix.stackexchange.com/a/394191/5132 https://unix.stackexchange.com/a/204075/5132 https://unix.stackexchange.com/a/196014/5132 https://unix.stackexchange.com/a/332797/5132"} +{"doc_id": 557766, "author": "Jason Parms", "text": "SSL certificate is used for two purposes. One is securing online transactions and private information which is transmitted between a web browser and a web server. Second is Trust, SSL is used for increase customer confidence. SSL proves secure session of your website, it means your customer trust on your website. Each certificate has own validation process and following this process certificate authority validates your business reliability and send a certificate for your website. A basic cheap SSL certificate only validates your domain authority and authenticated using the approver email verification system. Approver can easily get this certificate in just minutes with a generic email address. OV and EV SSL certificates plugged with customer\u2019s trust and through its strict authentication process, it gives the highest level of trust. EV SSL validates manifold components of identifying your domain and business information. It follows manual verification process and during this process system fails to verify or system defendants your business for potential false action then your order may be lined up for manual review. The main difference in trust factor and brand reputation, while your customers see the green address bar in your browser then they feel more secure and encourage to make transactions. Otherwise, some differences between other features like encryption, browser compatibility, key length, mobile supports, etc. Otherwise, certificates warranty explains differences. Certificate authorities provide extended warranty ($1K to $1.75M) against mis-issuance of an SSL certificate which explains the worth of your investment for website security. While we focus on the price of a certificate, it doesnt matter where to buy your certificate \u2013 certificate authority or an authorized reseller. Authorized reseller offers same SSL products, same security features, better support at reasonably priced. Jason Parm is affiliated with SSL2BUY (Global SSL Reseller)"} +{"doc_id": 426700, "author": "Tom Squires", "text": "They are mostly used for code reusability. If you code to the interface you can use a diffrent class that inherits from that interface and not break everything. Also they are very usefull in webservices where you want to let the client know what a class does (so they can consume it) but dont want to give them the actual code."} +{"doc_id": 426704, "author": "Falcon", "text": "Interfaces are the backbone of (static) polymorphism! The interface is what matters. Inheritance would not work without interfaces, as subclasses basically inherit the already implemented interface of the parent. How often you make uses of them and what makes you do so ?? Pretty often. Everything that needs to be pluggable is an interface in my applications. Often times you have otherwise unrelated classes that need to provide the same behaviour. You cant solve such problems with inheritance. Need different algorithms to perform operations on the same data? Use an interface (see strategy pattern)! Do you want to use different list implementations? Code against an interface and the caller does not need to worry about the implementation! Its been considered a good practice (not only in OOP) to code against interfaces for ages, for one single reason: Its easy to change an implementation when you realize it does not fit your needs. Its pretty cumbersome if you try to achieve that only with multiple inheritance or it boils down to creating empty classes in order to provide the necessary interface."} +{"doc_id": 590547, "author": "Klaws", "text": "With deep packet inspection, the ISP can detect most VPN protocols (not the data encrypted in the VPN packets, just that there is VPN traffic) and block it. Some companies do this to ensure that they can decrypt all traffic (with the MITM attack and forged certificates to have DPI on SSL as well). The idea is to force you to use insecure communication channels by preventing everything else. Note that these insecure channels might be more secure , from the companys point if view, as they can do Data Leakage Prevention there. In such a case, non-standard VPN techniques, like HTTP tunneling might be an option. Note that the terms of use might disallow measures to circumvent DPI. Edit: some ISPs use DPI for traffic shaping. They dont log all the transmitted data, they just check (for example) for BitTorrent traffic and assign it a lower priority or limited bandwidth. Now, they are not stealing your password, just the bandwidth you are paying for...."} +{"doc_id": 426708, "author": "Jesse C. Slicer", "text": "Use of interfaces helps a system stay decoupled and thus easier to refactor, change, and redeploy. It is a very core concept to object-oriented orthodoxy and I first learned about it when C++ gurus made pure abstract classes which are quite equivalent to interfaces."} +{"doc_id": 99028, "author": "Sunil Gill", "text": "There are many ways to find mac task manager. Easiest one is to use spotlight. Just type Activity monitor i.e mac task manager. If you wish to just force quit an app or process. You can just use this shortcut: Alt + Command (\u2318) + Esc Source: Mac Activity Monitor"} +{"doc_id": 426711, "author": "Sheldon Warkentin", "text": "Interfaces are the best way to maintain well decoupled constructs. When writing tests, you will find that concrete classes will not work in your test environment. Example: You want to test a class that depends on a Data Access Service class. If that class is talking to a web service, or database -- your unit test will not run in your test environment ( plus it has turned into an integration test ). Solution? Use an Interface for your Data Access Service and Mock that interface so you can test your class as a unit. On the other hand, WPF & Silverlight do not play at all with Interfaces when it comes to binding. This is a pretty nasty wrinkle."} +{"doc_id": 66266, "author": "Erik Berkun-Drevnig", "text": "In Mavericks there is an option to open a new terminal window at the folder you have highlighted in Finder. To enable this option go to Finder -> Services -> Service Preferences and check New Terminal at Folder. Then use it from the Finder menu:"} +{"doc_id": 99051, "author": "Gilad Shahrabani", "text": "A bit of an old question but in iTerm2 this helped me: iTerm2 \u2192 Preferences \u2192 keys \u2192 Add key mapping: command + w => ignore"} +{"doc_id": 590576, "author": "Xiong Chiamiov", "text": "Restricting your available pool of numbers reduces the number of possible solutions, making it less secure. Repeating digits is a common human weakness when choosing pin codes, which means it will be tried first by attackers. Thus, ruling out repeated numbers increases security. As is often the case, the decision has both upsides and downsides depending on the specific attack youre defending against. You probably shouldnt over-think it, and consider wider-perspective changes (like not using a 4-digit pin, or adding a second factor, or having lockouts on incorrect tries) if you want to increase the security of the system."} +{"doc_id": 754, "author": "raimue", "text": "iStat Menus has the functionality you are asking for. It is available starting at USD$14.39 for a single license or $17.99 for a family pack (up to five different Macs)."} +{"doc_id": 755, "author": "Michael H.", "text": "I use MenuMeters for this functionality, and have a hard time living without it. How do other people know when their web browser is finally done downloading a page, or YouTube stalled out, or iPhoto still working, or ...? MenuMeters is freeware, but well worth the donation. The original author has stopped maintaining MenuMeters, but someone new has taken over for El Capitan."} +{"doc_id": 262899, "author": "leden", "text": "Here are some additional points about aliases and functions: Same-named alias and function can co-exist alias namespace is looked up first (see first example) aliases cannot be (un)set in subshells or non-interactive environment (see second example) For example: alias f=echo Alias; f # prints Alias function f { echo Function; }; f # prints Alias unalias f; f # prints Function As we can see, there are separate namespaces for aliases and functions; more details can be found using declare -A -p BASH_ALIASES and declare -f f, which prints their definitions (both are stored in memory). Example showing limitations of aliases: alias a=echo Alias a # OK: prints Alias eval a; # OK: prints Alias ( alias a=Nested; a ); # prints Alias (not Nested) ( unalias a; a ); # prints Alias bash -c alias aa=Another Alias; aa # ERROR: bash: aa: command not found As we can see aliases are not nestable, unlike functions. Also, their usage is limited to the interactive sessions. Finally, note that you can have arbitrary computation in an alias by declaring a function a immediately calling it, like so: alias a_complex_thing=f() { do_stuff_in_function; } f which is already in widespread use in case of Git aliases. The benefit of doing so over declaring a function is that your alias cannot be simply overwritten by source-ing (or using .) a script which happens to declare a same-named function."} +{"doc_id": 66294, "author": "beporter", "text": "Also available are iosnoop and iotop depending on your specific needs. These terminal commands can be piped through grep to watch for filesystem events from a specific process or against a specific file."} +{"doc_id": 131832, "author": null, "text": "my understanding is that different mod/rom is similar to likes of ubuntu and fedora (different variant of linux) in PC world. Kernel is lower level than this, like both ubuntu 10.10 and fedora 14 are using the same linux kernel 2.6.35"} +{"doc_id": 590586, "author": "Dmitry Grigoryev", "text": "This really depends on how the PIN is created: If the PIN is generated, make sure the distribution is uniform and dont exclude any combinations. That will maximize the entropy. If the PIN is chosen by a human operator, it makes perfect sense to exclude some combinations. I wouldnt go as far as rejecting half of the combinations, but if you do, you should also consider reject PINs starting with 0 1 and 2 (think birth years and dates) then PINs corresponding to physical key layouts like 2580 and 1379 and so on and so forth. Just make sure you stop before you end up allowing a single 8068 PIN which this study has found to be the least probable. What you should do for human-chosen PINs is excluding the most common combinations: 1234 and 1111 alone account for almost 17% of all PINs in use, and 20 most popular PINs account for almost 27%. Those include each digit repeated 4 times and popular combinations like 1212 and 4321. Edit: on a second thought, I think you should exclude most common combinations in any case. Even if your PIN is randomly generated, the attacker may not know that, in which case they will most probably try those combinations first."} +{"doc_id": 33545, "author": "user199576", "text": "You could also take a look as ZOC6 seems pretty cool. ZOC6 product page"} +{"doc_id": 590603, "author": "J.A.K.", "text": "It depends on the implementation. Eliminating consecutive numbers will reduce the keyspace by .1% but has some benefits to physical security that may make it worth the tradeoff. A lot of good clever answers here, main point being that instead of making it more secure, youre making the keyspace smaller (however negibly, 10 out of 10.000). The top answers however fail to touch on the physical aspect of entering a pin. Visual and thermovisual extraction are a real danger these days. In other words, bad guys shoulder-surfing your pincode either with their eyes, a telescope, a skimming camera on the ATM or even thermal imaging cameras. That last one is more recent, and especially nasty as a skimmer can walk up to a pin pad and look at the heat signature, even if you covered the pad well. Having a consecutive pin will hurt security in this area; it reduces the complexity of the physical location of the numbers by a horrible amount. Even if you covered your hand, chances are the attacker will guess the button you pressed four times before a lockout happens. On a phone, if there is a big grease spot on the zero, thats the one ill try first."} +{"doc_id": 164626, "author": "bers", "text": "I am amazed no one has mentioned the Google solution, Datally. No root required. More information on the Google blog: https://blog.google/technology/next-billion-users/meet-datally-new-way-understand-control-and-save-mobile-data/ Or download immediately from the Play Store: https://play.google.com/store/apps/details?id=com.google.android.apps.freighter"} +{"doc_id": 197401, "author": "sarath", "text": "Whenever you delete a file using rm command, the files data is never deleted. In other words the blocks in the file system containing data is still there. What happens is when you run the rm command, the system marks the inode belonging to that file as unused and the data blocks of that file also as unused (but not wiped out). However ext3 zeros most of the fields in the inode, when a file is deleted. This normal marking of unused is done for the speed... Otherwise for deletion it will take some more time. Thats why you might have noted deleting even large files are faster (you can recover the data if that data blocks are not overwritten). More Info: Inode Structure, How file deletion works"} +{"doc_id": 590618, "author": "Ludwig Behm", "text": "Lets add a real example why this can be a good security improvement. Lets say your coworker or whoever found out about your webmailer password (say GMail 7years ago, without 2 factor). The attacker gets access into the webinterface to change your password (imagine some reasons) and via POP3 into your mails. Because Google is a huge network, it needs some time that old passwords are disabled for POP3 access. This gives the attacker the possibility to reset your password again and again. Even if you regain access with the reset function and validate yourself with your mailbox access on your smartphone or an reset strategy via SMS to your smartphone, the attacker (who still has access to your mailbox via POP3 with the old or his own passwords) can reset your password. With such an attack the victim cant be lock you out forever, because the attacker cant remove a reset strategy like a SMS number - but it surly states a very high risk. This attack vector is easily preventable, if password changes are possible only every 24 hours."} +{"doc_id": 230175, "author": "Florian Bidab\u00e9", "text": "Filtered port statement from nmap differs according your scan method. The standard scan (TCP Scan if unprivileged user, or Half-Open scan -sS if superuser) relies on TCP protocol . (named 3-way hanshake) A client (you) issues a SYN, if the server replies SYN/ACK : it means that the port is open ! You issue a SYN, if the server replies RST : it means that the port is close ! You issue a SYN, if the server does not reply, or replies with ICMP error : it means that the port is filtered. Likely an IDS / statefull firewall block your request) To figure what is the real status of the port, you can : use the -sV, or -A option (version detection, it will help you to determine what is the status of this port. use --tcp-flags SYN,FIN to try bypassing the fw. use other scan types (http://nmap.org/book/man-port-scanning-techniques.html) The excellent Nmap Network Discovery book, written by its creator Fyodor explains this very well. I quote filtered : Nmap cannot determine whether the port is open because packet filtering prevents its probes from reaching the port. The filtering could be from a dedicated firewall device, router rules, or host-based firewall software. These ports frustrate attackers because they provide so little information. Sometimes they respond with ICMP error messages such as type 3 code 13 (destination unreachable: communication administratively prohibited), but filters that simply drop probes without responding are far more common. This forces Nmap to retry several times just in case the probe was dropped due to network congestion rather than filtering. This sort of filtering slows scans down dramatically. open|filtered : Nmap places ports in this state when it is unable to determine whether a port is open or filtered. This occurs for scan types in which open ports give no response. The lack of response could also mean that a packet filter dropped the probe or any response it elicited. So Nmap does not know for sure whether the port is open or being filtered. The UDP, IP protocol, FIN, NULL, and Xmas scans classify ports this way. closed|filtered : This state is used when Nmap is unable to determine whether a port is closed or filtered. It is only used for the IP ID Idle scan discussed in Section 5.10, TCP Idle Scan (-sl)"} +{"doc_id": 131874, "author": "Matt H", "text": "The about:debug menu on the HTC Desire HD has an option under Menu > More > Settings called Enable GEP Zoom. This turns off HTCs Text reflow feature and uses the default Google zoom."} +{"doc_id": 623395, "author": "Robert", "text": "According to the OSPF RFC 2328 (10.6): If the Interface MTU field in the Database Description packet indicates an IP datagram size that is larger than the router can accept on the receiving interface without fragmentation, the Database Description packet is rejected. The simple answer is that the standard was designed to just drop datagrams which are too large instead of fragmenting them. Fragmented traffic increases the CPU burden of a device and decreases performance because of the need for the fragmentation needed extra traffic. Considering the goal of a dynamic routing protocol is it be a stable and fast converging protocol, anything contrary to those goals should be eliminated. Setting the requirement for the MTUs to match helps enforce this performance requirement. More from the OSPF RFC: 4.3. Routing protocol packets The OSPF protocol runs directly over IP, using IP protocol 89. OSPF does not provide any explicit fragmentation/reassembly support. When fragmentation is necessary, IP fragmentation/reassembly is used. OSPF protocol packets have been designed so that large protocol packets can generally be split into several smaller protocol packets. This practice is recommended; IP fragmentation should be avoided whenever possible."} +{"doc_id": 131877, "author": "Lie Ryan", "text": "Rooted phones are just as secure as an unrooted phones if you never grant root permission to any apps. The problem is that if you root your phone, youre bound to give root permission (otherwise, why are you rooting your phone in the first place), and applications that you give root permission may turned out to be rogue or leak their permission to allow an untrusted applications to gain root-like permission. Running rooted phone is safe as long as you know which app to give root access and which are not. Problem is, even assuming that you only pick trustworthy apps they still can leak permissions inadvertantly (in security parlance, this is called confused deputy problem), so you must really be careful when choosing trusted apps."} +{"doc_id": 361254, "author": "DustWolf", "text": "Here, a copypaste you can immediately use: cat list.txt | xargs -I{} command parameter {} parameter The item from the list will be put where the {} is and the rest of the command and parameters will be used as-is."} +{"doc_id": 99113, "author": "poige", "text": "From my own pain^W experience I can tell that this service is needed at least for text message forwarding (relaying) to work. Having it blocked with Firewall, for e. g., puts a big bold ban on Text Message Forwarding item in iPhones settings. In fact it wont be even shown at all there"} +{"doc_id": 557867, "author": "Babken Vardanyan", "text": "To add to what has already been said, I can think of 2 more reasons why changing passwords regularly is helpful: 1) When computational power raises substantially Say, in early 90s 7-char passwords were considered secure because computers werent nearly powerful enough to bruteforce them. 24 years later systems which still have the same password can be successfully bruteforced. Some calculations, considering the password consists of 24 letters (uppercase and lowercase), 10 numbers and 10 symbols, and Moores law (x2 more power every 2 years): possible combinations = (24 + 24 + 10 + 10) ^ 7 = 6,722,988,818,432 tries per second in 1990 = 100,000 (for example) time required in 1990 = possible combinations / tries per second = 2 years tries per second in 2014 = tries per second in 1990 * (2 ^ 12) = 409,600,000 time required in 2014 = possible combinations / tries per second = 4 hours This is more about increasing minimum required password length, but it should be done regularly and short passwords should be changed. 2) Compliance with ISO/IEC 27001/27002 standards From ISO/IEC 27002:2013, section 9.4.3: A password management system should: ... e) enforce regular password changes and as needed; 3) Compliance with PCI DSS standard From PCI DSS v3, section 8.2.4: 8.2.4 Change user passwords/passphrases at least every 90 days. 8.2.4.a For a sample of system components, inspect system configuration settings to verify that user password parameters are set to require users to change passwords at least every 90 days. 8.2.4.b Additional testing procedure for service providers: Review internal processes and customer/user documentation to verify that: Non-consumer user passwords are required to change periodically; and Non-consumer users are given guidance as to when, and under what circumstances, passwords must change. Passwords/phrases that are valid for a long time without a change provide malicious individuals with more time to work on breaking the password/phrase."} +{"doc_id": 33582, "author": "minnow", "text": "Another option is to enable the Use PC Style Home/End setting in Karabiner (formerly KeyRemap4MacBook): The setting is defined in Resources/include/checkbox/for_pc_users.xml. You can also save a file like this as ~/Library/Application Support/KeyRemap4MacBook/private.xml: HOMEENDIGNORE com.microsoft.Word com.microsoft.Powerpoint com.microsoft.Excel com.vmware.fusion com.vmware.proxyApp. homeend homeend HOMEENDIGNORE __KeyToKey__ KeyCode::HOME, ModifierFlag::NONE, KeyCode::A, VK_CONTROL __KeyToKey__ KeyCode::END, ModifierFlag::NONE, KeyCode::E, VK_CONTROL __KeyToKey__ KeyCode::HOME, VK_SHIFT | ModifierFlag::NONE, KeyCode::A, VK_CONTROL | VK_SHIFT __KeyToKey__ KeyCode::END, VK_SHIFT | ModifierFlag::NONE, KeyCode::E, VK_CONTROL | VK_SHIFT Then open the KeyRemap4MacBook application, press the ReloadXML button, and enable the setting. See https://pqrs.org/macosx/keyremap4macbook/xml.html.en for more information."} +{"doc_id": 816, "author": "mmmmmm", "text": "It is usually better to keep permissions as strict as possible. Keeping /usr/local owned by root means that only processes that run as root/sudo (or ask for admin user via the Apple authorization dialog box) can write to this area. Thus, a process download has to ask you for a password before corrupting files there. But as you say, it makes adding new programs harder. I am OK with running sudo, as you install things less often than running them but you have to trust that the build process does not change anything it should. If you want to avoid sudo I would install Homebrew into ~/usr/local and alter your path, manpath etc to include the directories under there. A better way is to create another user\u2014say, homebrew and create a directory owned by that user. Then, install there using sudo -U homebrew. Other users will have the benefit of not being able to overwrite any other files, because they are not running as root and other programs cannot affect homebrew. (I note that the Homebrew FAQ does suggest this new user if you are in a multi user environment. I would say that any Unix machine including macOS is a multi user environment) However as the Homebrew wiki says the recipes dont find all cases of /usr/local and replace them with the chosen directory I suspect we are stuck with /usr/local."} +{"doc_id": 459579, "author": "Scott Whitlock", "text": "Yes, splitting long functions is normal. This is a way of doing things thats encouraged by Robert C. Martin in his book Clean Code. Particularly, you should be choosing very descriptive names for your functions, as a form of self-documenting code."} +{"doc_id": 426816, "author": "keppla", "text": "For me, it was the other way around because i liked programming, i was good at math, because for most concepts, i had a practical application, starting simple with logic (x and y or z), to simpler analysis (ok, f(x) is a function. i know that, that even looks exactly like the functions in Pascal. go on), to higher stuff (a mapping |R -> |R? ah, thats like a Dictionary with floats as keys and values). I was often surprised, that math was not just not that hard, but instead provided me a much easier way of thinking about my programming problems than those clumsy, pragmatic languages i was coding in ever could."} +{"doc_id": 164679, "author": "Firelord", "text": "As of now, many apps can show the package name of the installed app, such as Elixir 2 (see Applications option), My App List, Applications Info and many more in Play Store. Screenshots of the ones I use:"} +{"doc_id": 557900, "author": "Kaz", "text": "Whether or not the hash is salted only makes a difference if the attacker has the password hash. Without a salt, the hash can be attacked with a rainbow table: a pre-computed dictionary which associates passwords with hashes. An N-bit salt increases the storage requirement for a rainbow table, and the time to compute that table, by a factor of 2**N. So for instance, with a 32 bit salt, just a single rainbow table dictionary entry, say for the password passw0rd, requires many gigabytes of storage, making such a rainbow table very expensive using current storage hardware. So when a salt is present, the attacker is reduced to a brute force attack on the specific set of password hashes that have been obtained. However: for weak passwords, a brute force attack will succeed in relatively little time. sufficiently strong passwords will not be found in a rainbow table. if the attacker has access to the hashes, system security has been compromised already: modern systems and protocols do not reveal their password hashes. If the attacker is not able to gain access to the password database, the passwords may as well be stored in plaintext. if an attacker must compromise the system to get to the hashes in order to reverse passwords from them, then the only passwords which have value to the attacker are those which are re-used for other security domains to which the attacker has no access yet. Passwords which are not reused have no value (and certainly less value than other sensitive information associated with the accounts). Say user joewestlake uses the password god1234. The attacker reverses this instantly using a rainbow table. (Or within mere minutes of cracking using a brute-force attack, if the hash is salted, since the password is so bad.) Now the problem is that joewestlake also used god1234 for his Gmail account, and for online banking, oops! Now the attacker reads Joes e-mails, and learns enough about Joe so that he can easily answer the question what was the name of your first pet when logging in to Joes online banking. So, the rationale for salts is that they somewhat protect users by making medium-security passwords harder to reverse: passwords which, without a salt, could reasonably be expected to be found in a rainbow table, but which are strong enough that individually brute-forcing them takes a very long time. But salts provide this benefit only in the event that the hashes are compromised, which is already a serious security breach, and the benefit is only for those users who re-use their medium-security passwords in other systems. Say Joe instead used a password made of 10 random alphanumeric characters and symbols. This could still be in a rainbow table, but takes a lot of work crack. So even if Joe used the same password for Gmail and online banking, he is safe, thanks to the salt. The cracker runs his brute-force crack for perhaps several hours, maybe days. The crack yields numerous weak passwords from others users of the same system who have weak passwords. The attacker is satisfied with that yield and stops cracking; Joes password is never reversed. Furthermore, if the breach is detected and users (including Joe) are advised to change their passwords, then Joe has a chance to outrun the attackers cracking attempts, even if the attacker persists in attacking the entire medium-security password space that includes Joes password. Joe knows that, the password he used on the compromised system is exactly the same as his Gmail and banking password, so he scrambles to change those other two. The salt helps here because it buys time for password-reusing users to change their password. The salt wont help those with very weak passwords which are cracked in minutes, but users of passwords which take hours or days to crack have a fighting chance."} +{"doc_id": 492370, "author": "Ixrec", "text": "Because as you say, protected still leaves you with the ability to modify the implementation completely. It doesnt genuinely protect anything inside the class. Why do we care about genuinely protecting the stuff inside the class? Because otherwise it would be impossible to change implementation details without breaking client code. Put another way, people who write subclasses are also the outer world for the person who wrote the original base class. In practice, protected members are essentially a class public API for subclasses and need to remain stable and backwards compatible just as much as public members do. If we did not have the ability to create true private members, then nothing in an implementation would ever be safe to change, because you wouldnt be able to rule out the possibility that (non-malicious) client code has somehow managed to depend on it. Incidentally, while In OOP, sooner or later, you are going to make a subclass of a class is technically true, your argument seems to be making the much stronger assumption that sooner or later, you are going to make a subclass of every class which is almost certainly not the case."} +{"doc_id": 492371, "author": "Sebastian Redl", "text": "In OOP, sooner or later, you are going to make a subclass of a class This is wrong. Not every class is meant to be subclassed and some statically typed OOP languages even have features to prevent it, e.g., final (Java and C++) or sealed (C#). it is good to understand and being able to modify the implementation completely. No, its not. Its good for a class to be able to clearly define its public interface and preserve its invariants even if it is inherited from. In general, access control is about compartmentalization. You want an individual part of the code to be understood without having to understand in detail how it interacts with the rest of the code. Private access allows that. If everything is at least protected, you have to understand what every subclass does in order to understand how the base class works. Or to put it in the terms of Scott Meyers: private parts of a class are affected by a finite amount of code: the code of the class itself. Public parts are potentially affected by every bit of code in existence, and every bit of code yet to be written, which is an infinite amount of code. Protected parts are potentially affected by every existing subclass, and every subclass yet to be written, which is also an infinite amount of code. The conclusion is that protected gives you very little more than public, whereas private gives you a real improvement. It is the existence of the protected access specifier that is questionable, not private."} +{"doc_id": 492372, "author": "Robbie Dee", "text": "Yes, private fields are absolutely necessary. Just this week I needed to write a custom dictionary implementation where I controlled what was put into the dictionary. If the dictionary field were to be made protected or public, then the controls Id so carefully written could have been easily circumvented. Private fields are typically about providing safeguards that the data is as the original coder expected. Make everything protected/public and you ride a coach and horses through those procedures and validation."} +{"doc_id": 850, "author": "John P. Neumann", "text": "Its already been answered, but heres my 2 cents. Via Bash through the terminal find ~ -type f -name *pdf or find ~ -iname *pdf (or, if you want to ignore error messages, find ~ -type f -name *pdf 2>/dev/null) or you can use this to search for a string in a file: find ~ -iname *txt | xargs grep string you want This may not return anything on a pdf, but it will work on most other file types (text, php, py, html, etc)."} +{"doc_id": 492374, "author": "Ben Cottrell", "text": "private variables in a class are better than protected for the same reason that a break statement inside a switch block is better than a goto label statement; which is that human programmers are error-prone. protected variables lend themselves to un-intentional abuse (programmer mistakes), just as the goto statement lends itself to the creation of spaghetti code. Is it possible to write working bug-free code using protected class variables? Yes of course! Just as its possible to write working bug-free code using goto; but as the cliche goes Just because you can, doesnt mean you should! Classes, and indeed the OO paradigm, exist to guard against hapless error-prone human programmers making mistakes. The defense against human mistakes is only as good as the defensive measures built into the class. Making the implementation of your class protected is the equivalent of blowing an enormous hole in the walls of a fortress. Base classes have absolutely no knowledge of derived classes. As far as a base class is concerned, protected does not actually give you any more protection than public, because theres nothing stopping a derived class from creating a public getter/setter which behaves like a backdoor. If a base class permits un-hindered access to its internal implementation details, then it becomes impossible for the class itself to defend against mistakes. Base classes have absolutely no knowledge of their derived classes, and therefore have no way of guarding against mistakes made in those derived classes. The best thing a base class can do is hide as much of its implementation as possible as private and put enough restrictions in place to guard against breaking changes from derived classes or anything else outside of the class. Ultimately, high-level languages exist to minimise human errors. Good programming practises (such as SOLID principles) also exist to minimise human errors. Software developers who ignore good programming practices have a much higher chance of failure, and are more likely to produce broken unmaintainable solutions. Those who follow good practices have a much lower chance of failure, and are more likely to produce working maintainable solutions."} +{"doc_id": 197469, "author": "gertvdijk", "text": "Good answer is already posted by @StevenD, yet I think this might clarify it a bit more. The reason that the process is killed on termination of the terminal is that the process you start is a child process of the terminal. Once you close the terminal, this will kill these child processes as well. You can see the process tree with pstree, for example when running kate & in Konsole: init-+ \u251c\u2500konsole\u2500\u252c\u2500bash\u2500\u252c\u2500kate\u2500\u2500\u25002*[{kate}] \u2502 \u2502 \u2514\u2500pstree \u2502 \u2514\u25002*[{konsole}] To make the kate process detached from konsole when you terminate konsole, use nohup with the command, like this: nohup kate & After closing konsole, pstree will look like this: init-+ |-kate---2*[{kate}] and kate will survive. :) An alternative is using screen/tmux/byobu, which will keep the shell running, independent of the terminal."} +{"doc_id": 459618, "author": "Fred", "text": "Im sure this wont be the popular opinion, but its perfectly ok. Locality of Reference can be a huge aid in making sure you and others understand the function (in this case Im referring to the code and not to memory specifically). As with everything, its a balance. You should be more concerned with anyone who tells you always or never."} +{"doc_id": 875, "author": "Nippysaurus", "text": "While dragging a window, press key combo to move window to numbered space: Control + 1 Control + 2 Control + 3 etc ... EDIT: For Lion, I had to go to System Preferences -> Keyboard -> Keyboard Shortcuts -> Mission Control and enable these hotkeys."} +{"doc_id": 878, "author": "robsoft", "text": "SizeUp You could have a look at SizeUp, which has been recommended on apple.se at least once before - it has some nice keyboard tools for throwing windows around multiple monitors (as I use it) and also for throwing them around multiple spaces. Its not free, but its very useful and certainly worth the small registration fee."} +{"doc_id": 230256, "author": "cuonglm", "text": "Try: find / -xdev -type f -size +100M It lists all files that has size bigger than 100M. If you want to know about directory, you can try ncdu. If you arent running Linux, you may need to use -size +204800 or -size +104857600c, as the M suffix to mean megabytes isnt in POSIX. find / -xdev -type f -size +102400000c"} +{"doc_id": 394097, "author": "TRiG", "text": "Problem: Youre using both meanings of the word free at once, which is confusing. Free Software (capital letters) is often, but not always, an ideological position. It can also be practical. How can you collaborate on closed-source code? As for freeware (free as in beer), some people do it to advertise their skills or as a taster, to encourage people to buy a more full-featured program. I have a freeware video converter which prompts me to install browser toolbars every time I use it. I dont use it often enough for this to annoy me."} +{"doc_id": 230257, "author": "z atef", "text": "In addition to @Gnouc answer , you can also add ls -la to get more details. You should have sudo privileges to do that . $ find / -xdev -type f -size +100M -exec ls -la {} \\; | sort -nk 5 To only see files that are in the gigbyte, do: root# du -ahx / | grep -E \\d+G\\s+ 1.8G /.Spotlight-V100/Store-V2/A960D58E-A644-4497-B3C1-866A529BF919 1.8G /.Spotlight-V100/Store-V2"} +{"doc_id": 164732, "author": "phlummox", "text": "tl;dr: have the Android command line tools installed on a development machine, and USB debugging enabled on your device. The device does not need to be rooted adb forward tcp:9222 localabstract:chrome_devtools_remote wget -O tabs.json http://localhost:9222/json/list Details: If you have the Android command line tools installed on a development machine, and have enabled USB debugging on your Android device, then you can do the following: Execute adb forward tcp:9222 localabstract:chrome_devtools_remote on your development machine. Chrome instances expose access to a debugging protocol via a unix domain socket with the abstract address chrome_devtools_remote. The above adb command forwards requests made to port 9222 on your development machine, onwards to that unix domain socket. (You can get a list of all the unix domain sockets on the Android device by typing adb shell cat /proc/net/unix.) Using this, we can run wget or curl (or indeed, a browser) and retrieve information from the mobile devices chrome instance. Debugging information is provided as JSON data over HTTP. A JSON file listing the open tabs can be got by executing wget -O tabs.json http://localhost:9222/json/list. (Other endpoints of the API can be found at https://github.com/buggerjs/bugger-daemon/blob/master/README.md#api.) See here for more details on enabling USB debugging, and here for an overview of how it works."} +{"doc_id": 394115, "author": "A. N. Other", "text": "Why does anyone offer free advice here on Stack Exchange when some people make money answering technical questions? I think this points to a basic psychological need to be generous. Jorge Moll and Jordan Grafman, neuroscientists at NIH, have found that charity is hard-wired in the brain. See the Washington Post article ``If It Feels Good to Be Good, It Might Be Only Natural at http://www.washingtonpost.com/wp-dyn/content/article/2007/05/27/AR2007052701056.html Both Kohlbergs theory of cognitive development and Gilligans ethics of caring view people as interdependent and developing towards increased empathy and altruism. This behavior is necessary for humanity to survive and thrive. Lewis Hyde says there are two types of economy: (1) The exchange economy (economy of scarcity), where status is accorded to those who have the most and (2) the gift economy (economy of abundance) where status is accorded to those who give the most. Examples of gift economies include marriage, family, friendship, traditional scientific research, social networks (like Wikipedia and Stack Exchange), and, of course, F/OSS. IMHO, Eric S. Raymond and Linus Torvalds performed a miracle: transforming selfish programmers into generous programmers. This is very similar to how Elisha transformed 2,200 selfish students into generous people with the miracle of ``the feeding of the multitude. In II Melachim 4:42-48 Elisha must support 2,200 students. Theres a famine. His students are hungry and selfish. Each of them has some food, but they refuse to share with each other. After Elisha distributed a mere 22 loaves of bread to them, they began to share with one another. Soon, not only are they all fed, but theres food left over. The true miracle is not that bread materialized out of thin air, but that those who were once selfish became generous, inspired by the example of one persons generosity. Something similar has happened over the last couple decades, as a result of the release of Linux and other free software."} +{"doc_id": 164739, "author": "Mica", "text": "If you install Xposed, this module has an option to prevent the screen from waking when plugging into a charger: http://repo.xposed.info/module/com.smartmadsoft.xposed.aio"} +{"doc_id": 66439, "author": "LexS", "text": "When an app has been uninstalled, often the icons are still in Launchpad. You still can not remove it. To remove click it and it will show with an question mark: drag it to the trash then."} +{"doc_id": 394121, "author": "Antsan", "text": "There are several reasons for making software available for free. It could be, that the software is only written to produce something else - making the source free, offers the opportunity to incorporate bug fixes and features by third parties without having to pay them, while you can get money out of what you produce with that software. See The Cathedral and the Bazaar. Another reason is that you write the program for fun and/or training and getting comments on your code by peers or even more capable persons than yourself might be more important than earning money - in this case, selling the software for a profit wouldnt be profitable at all. And theres the third option of high skill linked with high self-esteem, where you take the route of Tarn and Zach Adams and make a living off the donations you get. Dwarf Fortress (programmed by Tarn Adams) is available for free, yet receives thousands of dollars in donations per month."} +{"doc_id": 66467, "author": "Asso", "text": "You need to give permission to the path. run this in command line and you will be fine to go. It worked for me: sudo chown -R $USER /usr/local"} +{"doc_id": 164789, "author": "ofir_aghai", "text": "Just share the app from Google Play Store to somewhere, (by clicking on the share button in the apps page) and see the shared value. You will see something like: https://play.google.com/store/apps/details?id=com.my.packagename (If you cant find it in Google Play: You can long press on app icon and press on Application info Go to the bottom of the Android page, and then click on App details in store)"} +{"doc_id": 33729, "author": "Othniel", "text": "Shift-Command-G in Finder brings up a Go to folder dialog. Type in the name of the directory, for example, /usr/local. Finder will show the directory. I use this with Finder in View as Columns While this doesnt give a browsable directory from the root directory down, Ive found it quite useful."} +{"doc_id": 66499, "author": "Matt Reilly", "text": "deleting it from my bookmarks worked for me. backstory: typing yah for yahoo.com autofilled yahooligans.com (safari 8.0.7 on yosemite 10.10.4). fix: found it deep in a sub menu in the bookmarks folder and erased it. now typing yah fills yahoo.com. woo hoo and thanks!"} +{"doc_id": 164803, "author": "xavier_fakerat", "text": "You could also use YouTube Vanced. YouTube Vanced is a modded version of YouTube. It is a feature enhanced version of the official YouTube app, most importantly brings the feature for background playback (as well as other equally useful features including built-in adblocking, black/dark themes, etc). It officially supports minimum (API 17) version 4.2 jellybean and works on both rooted and non-rooted devices. This page has installation instructions and download links for non-rooted and rooted devices. Additionally you need to install the install MicroG package (found in the download links) in order to be able to log into google account (not necessary if you dont want to log in) (Click image to enlarge)"} +{"doc_id": 66514, "author": "Conrad", "text": "(This irritating bug seems to be biting me more often after updating to 10.10.4) When this happens and I dont want to have to reboot, I only restart the Dock process and the problem is gone. Theres probably some GUI way to do it, but I just drop into Terminal and do: killall Dock But before that, run killall -s Dock to see what would be killed, to make sure its only going to directly kill the one Dock process. Once you run killall Dock, Dock will restart in a few seconds and then Mission Control is working again. You can verify that the restart happened by checking the Process ID of Dock after the restart."} +{"doc_id": 99290, "author": "user56reinstatemonica8", "text": "None of the above answers work for me in Sierra (version 10.12.2), but entering this command into the terminal does work and shows all hidden files everywhere (greyed out so you can see which are hidden by design), including /var, /usr, /etc, etc: defaults write com.apple.finder AppleShowAllFiles -boolean true ; killall Finder Note that this shows hidden files everywhere - I would consider this a good thing, but some people might not want to see all the hidden files scattered across their directories. The killall Finder bit simply restarts Finder so that the change immediately takes effect. Credit to this LifeWire article, which also recommends using this command instead if you are on OS X 10.8 or earlier: defaults write com.apple.finder AppleShowAllFiles TRUE ; killall Finder Both commands can be undone by repeating the command but replacing true (or TRUE) with false (or FALSE). The root directory looks like this after the change:"} +{"doc_id": 33773, "author": "Pyramis", "text": "I just discovered the missing solution here: http://osxdaily.com/2012/11/15/stop-software-update-mac-os-x/ You can selectively disable notifications about specific software updates you dont care about (like useless App updates) by control-clicking the update inside the App Store to reveal a hidden Hide Update option. This is best for those who want to keep Notification Center active for all its other uses (calendar alarms, etc.) and who also want to be notified about critical software updates. It should permanently mute notifications about certain software updates (until that software comes out with a new update). (Hopefully this also works for the case where non-admin users are getting update notifications they cant act on.) Screen-capture from OSxdaily article:"} +{"doc_id": 1006, "author": "Jon Hadley", "text": "Double clicking the home button in iOS4 shows a custom task bar with your recently used and still running multi-tasked apps. Click and hold to close them one by one. Also shows the iPod controls as before."} +{"doc_id": 1007, "author": "nanda", "text": "Clicking the home button and power button will capture your screen and put it on your Photo Albums"} +{"doc_id": 1008, "author": "nanda", "text": "Tapping the status bar will almost always bring you to the top of the scroll view."} +{"doc_id": 99313, "author": "vk.edward.li", "text": "Actually the workaround has been merged in Chromium already https://chromium-review.googlesource.com/c/chromium/src/+/801030: In macOS 10.13, if: - Chromium is the default browser - Chromium has updated - The user has not relaunched and the user tries to open a link from an external program, instead of opening the link in the running instance of Chromium, LaunchServices tries to open a second instance of Chromium. Currently, this causes the running instance to open a blank new window. So this issue has nothing to do with the App Update, before it is released in Google Chrome we must restart the application to fix it temporary. The actual root cause and proposed solution is here: https://bugs.chromium.org/p/chromium/issues/detail?id=777863#c56"} +{"doc_id": 1009, "author": "pgb", "text": "When using the keyboard, if you tap the 123 button, and slide your finger (without lifting it) to any number or punctuation symbol, when you lift your finger the keyboard will return to the alphabetic keyboard automatically. One less tap!"} +{"doc_id": 1010, "author": null, "text": "Really, really hidden features that will only appear if you chant the correct spells: Dial : **3001#12345#* * to bring up the spirit of iPhones Field Test mode (this is different from the DFU mode familair to jailbreakers), which will give you information about towers and signal strength, 3G/EDGE network and much more info which no mortal would ever need. *#06# will display your IMEI. No need to tap Call. And some spells that only works in AT&T land: *777# and tap Call. Account balance for prepaid iPhone. *225# and tap Call. Bill Balance. (Postpaid only) *646# and tap Call. Check minutes. (Postpaid only) And a few more enchantments related to call forwarding : *#21# and tap Call. Setting interrogation for call forwards. Discover the settings for your call forwarding. Youll see whether you have voice, data, fax, sms, sync, async, packet access, and pad access call forwarding enabled or disabled. And lots and lots of other spells for all occassions, phew... *#30# and tap Call. Calling line presentation check. This displays whether you have enabled or disabled the presentation of the calling line, presumably the number of the party placing the call. *#76# and tap Call. Check whether the connected line presentation is enabled or not. State whether the connected line presentation is enabled or disabled. Presumably similar to the calling line presentation. *#43# and tap Call. Determine if call waiting is enabled. Displays call waiting status for voice, data, fax, sms, sync data, async data, packet access and pad access. Each item is either enabled or disabled. *#61# and tap Call. Check the number for unanswered calls. Show the number for voice call forwarding when a call is unanswered. Also show the options for data, fax, sms, sync, async, packet access and pad access. *#62# and tap Call. Check the number for call forwarding if no service is available. Just like the previous, except for no-service rather than no-answer situations. *#67# and tap Call. Check the number for call forwarding when the iPhone is busy. And again, but for when the iPhone is busy. *#33# and tap Call. Check for call control bars. Check all the usual suspects (voice, data, fax, sms, etc) to see whether barring is enabled or disabled for outgoing. originally submitted by GeneQ on Super User."} +{"doc_id": 1011, "author": null, "text": "If you hold your finger down on a link in Safari then you can open the linked page in a new tab. originally submitted by John Topley on Super User."} +{"doc_id": 1012, "author": null, "text": "In 3.0, you can hold down the hyphen key on the keyboard to get an em dash character. Similarly hold down the period to get an ellipsis. You can also hold down keys such as a or e to get accented versions of those letters. originally submitted by John Topley on Super User."} +{"doc_id": 1013, "author": null, "text": "In the Mail application you can select the portion of text that you want to quote. Hit the Reply button and the selection will appear at the end of the email, instead of the whole original message. originally submitted by John Topley on Super User."} +{"doc_id": 1014, "author": null, "text": "In the maps app you dont have to pinch to zoom in and out. Double tapping zooms in and tapping with two fingers zooms out. This makes it much easier when you are using the phone with one hand. originally submitted by andynormancx on Super User."} +{"doc_id": 1015, "author": null, "text": "In 3.0, you can adjust the scrubbing speed through a song by touching the scrubbing slider, then moving your finger down on the screen. Different distances from the scrubbing bar mean different (slower) scrubbing speeds. While its not exactly private browsing, you can clear your Safari history, cookies, and cache from the Settings application in the Safari menu. The Home button (when pressed in the Springboard) by default will send you to the first screen of applications. However, if you press Home, wait for the first page of apps to be displayed, then press Home again, the Search screen will be presented. Subsequent presses will toggle back and forth between the first apps page and the Search page. (Note that you have to delay slightly between presses, otherwise the two-press shortcut will kick in and launch some other app (iPod by default)). originally submitted by Tim on Super User."} +{"doc_id": 66553, "author": "Marc Stober", "text": "Force quit did not work reliably for me in iOS 8. What did is using private mode for testing: Open the page in private mode (and bookmark it so you can open it later). When you need to refresh, close browser tab (you can stay in Safari and private mode). Open the page again (use the bookmark you created in step 1)."} +{"doc_id": 1016, "author": null, "text": "In 3.0, when editing some text, shake the phone to get the option undo your edits. To redo your edits, just shake the phone again. originally submitted by andynormancx on Super User."} +{"doc_id": 1017, "author": null, "text": "Ive just discovered that if you scroll to the top of the list of Contacts in the Phone app, your own phone number is listed above the search field. originally submitted by John Topley on Super User."} +{"doc_id": 1018, "author": null, "text": "Go to Settings->General->Keyboard, and turn on Enable Caps Lock. When typing the shift key will still behave as before, but now you will have the option to double-tap it to turn on Caps-Lock (shift key will turn blue). Useful for acronyms, and shouting. originally submitted by jwaddell on Super User."} +{"doc_id": 1019, "author": null, "text": "Special characters on the keyboard. Hold down E, Y, U, I, O, A, S, L, Z, C, or N to bring up accented letters Hold down $ in the number/symbol keyboard to display other currency symbols Hold down \u201c or \u2018 in the number/symbol keyboard to access alternative quote characters Hold down ? or ! in the number/symbol keyboard to bring up the \u00bf or \u00a1 symbol Hold down the .com key when entering Web addresses to bring up options for .net, .edu and .org. (You can also do this trick when entering email addresses in Mail by tapping and holding the . (period) key.) (from http://mac.elated.com/2009/01/05/15-secret-iphone-tips-and-tricks/ ) originally submitted by Herb Caudill on Super User."} +{"doc_id": 1020, "author": null, "text": "When the iPhone is locked you can still access the iPod back/play/pause/next track buttons by pressing the Home button twice - only discovered that the other day and completely by accident...! originally submitted by Mike McClelland on Super User."} +{"doc_id": 66559, "author": "mmmmmm", "text": "You can install coreutils with Macports as sudo port install coreutils This will put GNU core utils in /opt/local/bin with a g prepended e.g. gshuf More details on the package coreutils."} +{"doc_id": 1021, "author": null, "text": "You can have more than the 11 screen limit for apps. If you add them, they wont be displayed, but will still be installed on the phone. You can still get to them by using a spotlight search for the app name and launch them, even though theyre not visible. originally submitted by Mark Struzinski on Super User."} +{"doc_id": 1022, "author": null, "text": "You can double tap the shift to keep caps lock on. Tap just below the seek/scrub bar to hide/show it. originally submitted by e11world on Super User."} +{"doc_id": 1023, "author": null, "text": "You can remove individual text messages from a conversation by clicking the Edit button in the top right-hand corner of the Messages app. originally submitted by John Topley on Super User."} +{"doc_id": 1024, "author": null, "text": "Double tapping the space key ends a sentence by filling in a period. originally submitted by Sathya on Super User."} +{"doc_id": 1025, "author": null, "text": "You can play videos even when the phone is locked. Sometimes I listen to video podcasts, where all I am interested in is the audio. When doing this, if you lock the phone the podcast stops playing. However, you can start the podcast playing again even with the phone locked. Just double press the home key to bring up the iPod controls and then press play. originally submitted by andynormancx on Super User."} +{"doc_id": 1026, "author": "Brian Kim", "text": "Everyone knows about pinch to zoom in/out on Google Maps. Many people know that they can double-tap to zoom in. Did you know that you can do two-finger-tap to zoom out? So useful when driving with iPhone mounted on dash."} +{"doc_id": 1027, "author": "Josh Newman", "text": "You can listen to any audio that is being streamed through Quicktime in Safari (.mp3, .m3u, .pls etc files) in the background. Start the stream via a link in Safari.app, and then hit the home button and the audio will continue to play. This was very notable pre-iOS4, not sure if it worked pre iOS3 though."} +{"doc_id": 1028, "author": null, "text": "Since the screen is multitouch, you can hold the Shift key while typing other keys to type them in uppercase (just like a real keyboard)."} +{"doc_id": 1029, "author": "Josh Newman", "text": "Since the 3GS, you can use the headphones remote button to stop/start video recording"} +{"doc_id": 132119, "author": "Matthew Read", "text": "/data/init.sh runs at boot, if you have root you can edit it as you like. Be careful ;) Edit: Apparently you might need to shoehorn the edited script into the boot image as well. Info on how to do that here: http://forum.xda-developers.com/showthread.php?t=443994"} +{"doc_id": 132125, "author": "DrDro", "text": "Some malwares can escalate to root permissions even on a un-rooted phone. Like the one they just recently found in over 50 (popular) apps on the android market. http://www.pcworld.com/businesscenter/article/221247/droiddream_becomes_android_market_nightmare.html"} +{"doc_id": 99358, "author": "vaughan", "text": "I have the same problem. I gradually started noticing double key presses for few keys showing up in the text I typed. The offending keys are: i, t and e. How to reproduce I went to Keyboard > Key Repeat > Off. When I press down the i and then slide my finger from the left edge to the right edge of the key, it triggers more keypresses. If I key my finger still and press down with more pressure, it sometimes triggers more keypresses. Pressure seems to be important. If I rub lightly it doesnt happen as much. I wonder what is causing it. I hope its dust. I will clean my keyboard tonight. But seeing as they all occur in the same row, maybe its something to do with the circuitry - I have to research more about how the keyboard works. Sprayed my laptop keyboard with a lot of pressurized air. Did not help at all. the T and I keys are still consistently causing issues."} +{"doc_id": 361526, "author": "murlakatamenka", "text": "For zsh users: rm -v **/*(-@) from zsh user guide (search for broken symlinks)"} +{"doc_id": 230457, "author": "BenC", "text": "In my case, using ffmpeg directly did the trick and provided the best result: $ ffmpeg -f gif -i infile.gif outfile.mp4"} +{"doc_id": 590907, "author": "Root", "text": "You dont have to. Some older browsers and operating systems (these usually go hand-in-hand) do not have newer certificate root authorities, but they usually dont support newer HTTPS standards either, so nothing really is lost. You may have a device which doesnt support HTTPS, custom script, etc. No one can spoof HTTP, because the DNS record belongs to you and the A record points to your specific IP address (in a perfect world). You do it just to maintain compatibility, thats it."} +{"doc_id": 590910, "author": "Ronny", "text": "Why dont I just serve https only? The main reasons are the default behavior of browsers and backward compatibility. Default behavior When an end-user (i.e, without knowledge in protocols or security) types the website address in its browser, the browser uses by default HTTP. See this question for more information about why browsers are choosing this behavior. Thus, it is likely that users will not be able to access your website. Backward compatibility It is possible that some users with old systems and old browsers do not support HTTPS or more likely, do not have an up-to-date database of root certificates, or do not support some protocols. In that case, they either will not be able to access the website or will have a security warning. You need to define whether the security of your end-users is important enough to force HTTPS. Many websites still listen to HTTP but automatically redirects to HTTPS and ignore users with really old browsers. could someone spoof http://www.example.com if I dont set up HSTS? If an attacker wants to spoof http://www.example.com, it needs to take control of the domain or take control of the IP address in some way. I assume you meant: could an attacker perform a man-in-the-middle attack? In that case yes, but even with or without HSTS: Without HSTS: An attacker can easily be in the middle of your server and the user, and be active (i.e, modify the content) or passive (i.e., eavesdrop) With HSTS: The first time a user try to visit the site using HTTP, an attacker could force the user to use HTTP. However, the attacker has a limited time window of when it can perform its attack. What you should do? Like many websites, you should allow HTTP connections and make you server redirects the user to the HTTPS version. This way you override the default behavior of browsers and ensure your users use the HTTPS version. Old systems without the proper protocols or root certificates will not be able to access the site (or at least will have a warning), but depending on your user base this should not be an issue. Conclusion It will do more harm than good to disable HTTP. It does not really provide more security. Any security added to protect a resource is useless if it prevents most of its users from accessing it. If your end-users cannot access your website because their browser default to HTTP and you do not listen for HTTP connections, what is the benefit? Just perform the HTTP 301 redirection to the HTTPS version. Related questions Why do browsers default to http: and not https: for typed in URLs? Why is HTTPS not the default protocol? Why should one not use SSL? Why do some websites enforce lack of SSL?"} +{"doc_id": 66623, "author": "arnt", "text": "Most devices choose the strongest signal. So set the 5GHz AP(s) to maximum power and then just diminish the broadcast power of the 2.4GHz AP(s) until 5GHz is seen to be preferable. The price you pay is that any 2.4-only devices will have even more problems with congestion. But perhaps escaping congestion on most devices is worth that."} +{"doc_id": 590912, "author": "Anders", "text": "For usability reasons you need to offer a redirect to HTTPS from all HTTP URL:s. Otherwise first time visitors who simply enter example.com/some/page into the URL bar of the browser will be greeted by a connection error. Serving the redirect does not make you more vulnerable. Users who dont have your HSTS entry in their browsers will make a HTTP request anyway. Whether or not there is a real service or not on HTTP is irrelevant to a man in the middle. So you need to run a HTTP server, but it doesnt need to respond with anything but the redirects."} +{"doc_id": 590916, "author": "user3496510", "text": "You should support HTTP only to support backward compatibility. And Make sure that you do proper redirection in the back end server to HTTPS. The best way to implement this is provide the HTTP support only to your home page or any page which do not have sensitive information. You must not support HTTP requests to pages where user can access after the Authentication. Even if there are devices(IoT) are accessing your servers sensitive data, you must force them to use TLS ( many current devices can store your certificate and create TLS connection). Keep in mind the SSL versions prior to 3.0 do have many vulnerabilities such as poodlebug etc.. Hence, disable all previous version from your Web server and allow only > TLS 1.1. It is good that you implement the HSTS. I recommend you to take a look at feasibility of implementing HPKP to your site as well."} +{"doc_id": 590920, "author": "Taul", "text": "The up-voted answers are very good. Youll sacrifice usability without a major impact on security if you completely shut off HTTP. However, you can mitigate that with the HSTS Preload option. Preloading your website means you register your domain with the browser vendors and theyll hard-code their browsers to visit your website via HTTPS only. That means if a user attempts to access your website over HTTP the browser will change the request to HTTPS. They user doesnt need to first get the HSTS header before being secure. They will always connect to you over a secure channel. Now this doesnt protect users who are using browsers that havent updated their list of HTTPS only websites. Even when using preloading I recommend not shutting off HTTP for the few people who are using old or un-updated browsers. But beware, preloading is permanent! It is extremely difficult to get off the preload list. To get on the preload list: https://hstspreload.org/"} +{"doc_id": 132168, "author": "Matt", "text": "There is an app called Devide by Enterproid that may do the trick. Its meant to be for deploying in a work environment so that a user can have a work profile and a personal profile on the phone, each separate and secure with different settings and apps specific to the work or personal environment. I dont see why it wouldnt work with two different peoples profiles instead of the work vs. personal setup. I dont know about compatibility with the Xoom, or cost (its probably not cheap) and they are still in an invite only beta but its worth requesting an invite and checking it out."} +{"doc_id": 132178, "author": "AdamP", "text": "Ive used RealmBs Android Certificate Installer to great success. You simply upload your PEM encoded (.cer or .pem) file and then point your phones browser to the link that is provided. No need for a private key."} +{"doc_id": 222, "author": "Buddy Lindsey", "text": "I found a hacky way to do it after you have created the applet (Create your icns icon file - however you want) Open Applications Folder Right-click on automator script Click on view package contents Add your icon to resources folder Rename it to AutomatorApplet.icns I am sure there is a better way, but I figured out how to do it this way."} +{"doc_id": 99431, "author": "David", "text": "Unfortunately, Apple does not provide that information when you are about to delete a picture or albums. It would be nice if Apple would give us a Help option in the photos app that can also be removed if desired. Furthermore, I am not an expert on this subject but I will provide a sensible answer from my research, rather than making a comment that DOES NOT answer the question. But Hey, everybody is entitled to there opinions and comments. So here it goes: How do you eliminate a photo off your iPhone without eliminating it completely from my iCloud Photo Library and other devices? You can\u2019t, and it\u2019s a little counter-intuitive as to why. You have to twist your head around to how Apple thinks of cloud storage for media. When you use iCloud Photo Library, the \u201ctruth of your library is in the cloud\u2014that means that the accurate, full, current state of all your images and other media are always stored on Apple\u2019s servers, and the various places you can download or view images (Web, iOS, and OS X) are endpoints. (The only point in which that\u2019s not true is when you have images that remain to be uploaded from an endpoint.) Thus, if you delete an image in any associated app, you\u2019re telling iCloud Photo Library to remove that image from its \u201ctruth, which means to delete it from the central repository and sync that change to all endpoints. You can choose to only store optimized (low-resolution) media in iOS and OS X, but you can\u2019t delete images in one place and expect them to remain elsewhere. And you can\u2019t maintain separate local albums in Photos that aren\u2019t synced. (In OS X, you can use alternate, non-synced libraries, however, choosing them when you launch Photos. In iOS, you can use alternate photo apps that can have their own libraries, too.) If you want the ability to delete images from an iPhone or any device without deleting it from all your devices, you have to disable iCloud Photo Library and use another form of sync or another service, like Dropbox, Google Photos, or Amazon Cloud Drive. https://www.macworld.com/article/3058175/photography/the-problem-with-deleting-photos-from-your-device-with-icloud-photo-library-enabled.html"} +{"doc_id": 224, "author": "Studer", "text": "After the script has been created do the following : Find the source app with the icon you want Get Info of the source app (cmd-i) Click on the icon inside the source apps info window (Not the one in the big Preview section at the bottom, if you have one of those; the little one in the top-left corner.) Copy it (cmd-c) Get Info of the automator script (i.e., the destination app) (cmd-i) Click on the icon inside the destination apps info window Paste the icon from the clipboard (cmd-v) This method works for every files in Mac OS X."} +{"doc_id": 225, "author": "Chealion", "text": "You can also change the icon just as you would normally for any other folder or file in Mac OS X: Copy the image you want to use as your icon. Ideally it should be 512x512 already. Choose Get Info on the file/folder/app in Finder Click on the Icon and youll notice a blue outline on the icon - now press Command-V or choose Paste from the Edit menu. (Screenshot taken from Super User question 37811)"} +{"doc_id": 132207, "author": "Rogier", "text": "I am great fan of APP2SD. It comes directly with an advice which apps can be moved to your SD and if the cache can be removed!"} +{"doc_id": 66677, "author": "Sanaah", "text": "Start by typing in the website into the Safari URL bar example Gmail. Then at the bottom of the suggestions itll show you Search for gmail in History. Click that option and then select all and press delete. Worked like a charm for me."} +{"doc_id": 132218, "author": "Matthew Read", "text": "The Short Answer Theoretically, all devices that meet Androids minimum requirements can run Android, its just a matter of customizing Android for the device. The Long Answer While Android is open source and can be modified to suit many devices, firmware and hardware drivers are most often not made readily available -- especially not the source code. Android wont run on a device without drivers for that specific device, so this means that you cant simply compile the code for Android and run it on your phone. Android is a very different operating system than other phone platforms; Android and Windows Phone 7, for example, are just as different as Ubuntu and Windows 7 for the PC. This means that even if you have WP7 drivers for your device, those drivers wont work on Android. Youll have to modify those drivers to be compatible with Android, and you may need to reverse-engineer a lot of code. This is very difficult and time-consuming, and sometimes even a team of people have little success with it. Even getting a new version of Android to run on an Android phone can be hard. The upside is that many phone manufacturers are now putting out versions of the same device with different operating systems. One example is the HTC HD2, which runs Windows Mobile 6 but is very similar to the Android HTC Desire Z. The Desire Z ROM only requires slight modification to run on the HD2. Android ROMs/projects for non-Android Phones Various developers have gone through the effort of creating an Android ROM that can be installed on other devices, or have started to do so. The following statuses will be used to describe each project/ROM: Pre-alpha: Concept stage. You cannot use Android at all yet. Alpha: Android is technically usable, but many major features are missing. Beta: Most major Android features are usable, but its fairly buggy. Complete: This is almost as good as real Android! Unknown: Its, well ... unknown. Apple iOS Devices There used to be a project iDroid for porting Android 2.3 to a jailbroken iPhone (2G or 3G), but the project died in 2014 without ever becoming stable. Nobody seems to be working on a port for iPhone or iPad any more. Bada devices badaDroid (German site) Android on Bada (XDA; based on cyanogenmod; also see here) Android porting @BadaForums LG Devices (Proprietary OS) LG Arena / KM900 See the Arenoid project. Pre-alpha. WebOS (HP) Devices HP Touchpad Android 4.4: Touchpad Toolbox. Beta. Windows Mobile 6 Devices HTC Gene / P340x Android 1.5: See this XDA thread (Wing Linux). Alpha. Leo / HD2 Android 2.2: See XDAs guide to running Android on the HD2. Complete. Android 2.3: See various 2.3 ROMs available on XDA. Unknown. Android 4.0: NexusHD2. Complete. Tilt / Kaiser / TyTN II / MDA Vario III Android 2.1: See this XDA thread (CM5). Beta. Android 2.2: See this XDA thread. Beta. Touch CDMA / Vogue 100 / P3050 Android 2.2: See XDAs Touch CDMA - Android Development subforum. Beta (most ROMs). Android 2.3: Same subforum. Alpha. Touch Cruise / Polaris / P3650 Android 2.1: See this XDA thread (CM5). Beta. Android 2.2: Apparently Fresh Froyo may work, which is a Touch CMDA ROM. See XDAs Touch CDMA - Android Development subforum. Unknown. Touch Diamond / MDA Compact IV / P3051 / P370x Android 2.2: See this XDA thread. Beta. Touch Diamond 2 / Pure / Topaz / MDA Compact V / T5388 Android 1.6-2.2: See this XDA thread. Beta. Touch Dual / Nike / MDA Touch Plus / P5500 Android 2.2: See this XDA thread. Alpha. Touch GSM / Elf / P345x Android 1.5: See this XDA thread (Wing Linux). Alpha. Touch HD / Blackstone Android 2.2: See this XDA thread. Alpha. Touch Pro / Fuze / Raphael Android 2.2: See this XDA thread. Beta. Touch Pro 2 / Tilt 2 / Rhodium Android 2.2: See this XDA thread. Beta. Android 2.3: See this XDA thread. Alpha. Samsung Omnia II / i8000 (See also: MoDaCo thread) Android 2.1: See Omnia2Droid. Beta. Android 2.2: See Omnia2Droid. Beta. You may be able to boot Android from an SD card on your WinMo device as well, leaving WinMo intact on your device. This guide shows how to do so, and it works on many WinMo devices. Windows Phone 7 Devices None yet. Maemo and MeeGo (Harmattan) Devices Nokia N900 Android 2.3: See NITDroid for N900. Alpha. Nokia N9/N950 Android 4.1: See NITDroid for N9. Alpha. Symbian Devices (Nokia and Sony Ericsson) There arent currently any known successful attempts to port Android to devices originally designed for Symbian. Nokia N95 Android Port for Symbian phones blog (Gizmodo mention of that project). Unknown Other Proprietary OS Devices Samsung Jet S8000 / S8003 Android 2.3: See JetDroid. Beta. Android 4.0: See CM9. Alpha x86 Tablets/PCs The Android-x86 project develops distributions of Android for x86-based systems, such as Apple Macs, most Windows PCs and tablets. Some Android devices already have x86 SoCs, such as the Dell Venue series and the HP Slate series. Android-x86 comes with the GRUB bootloader, which allows it to be dual-booted with Windows/Linux/OS X. It can be installed to a hard drive/SSD partition during initial setup. There is no LiveCD functionality in Android-x86. It is regularly updated with the latest Android version. For more information about Android-x86 and more specific help, look at our frequently asked android-x86 questions. Feel free to add to this answer as new ROMs come out for more devices!"} +{"doc_id": 132220, "author": "Bryan Denny", "text": "Yes, you have to root your phone in order to remove preinstalled apps. See these questions here and here. Before you remove any applications, MAKE SURE YOU MAKE A BACKUP via Nandroid. You may delete an app that the system relies on (for one reason or other). Do some searching and make sure it is safe to remove the apps you want to remove."} +{"doc_id": 132221, "author": "Matthew Read", "text": "Yes, you have to be rooted. To get rooted, see this question: How do I root my Android device? One of the easiest ways to uninstall system apps after rooting is to use Titanium Backup. You can just click an app and choose Uninstall. Note: Be careful what you remove! You might want to use Titanium to back them up before you remove them, in case it makes your phone unstable or stop working. Also note that the bootloader may restore system apps on reboot. If possible, unlock your bootloader; you may also need to install a custom ROM thats deodexed, or to edit the image that is restored by the bootloader. That depends on your particular device, but with the SGS you should have no bootloader-related problems."} +{"doc_id": 33925, "author": "nwinkler", "text": "bash-it (Free) An alternative to oh-my-zsh (see above). A really useful collection of scripts, aliases and functions for bash. Also provides a nice plugin system for adding your own additions."} +{"doc_id": 197774, "author": "Mike", "text": "Here is probably the best description of echo vs printf http://www.in-ulm.de/~mascheck/various/echo+printf/ At a very high level.. printf is like echo but more formatting can be done."} +{"doc_id": 296078, "author": "Moustafa Elqabbany", "text": "If you dont need regular expressions, its much faster to use fgrep or grep -F with multiple -e parameters, like this: fgrep -efoo -ebar *.txt fgrep (alternatively grep -F) is much faster than regular grep because it searches for fixed strings instead of regular expressions."} +{"doc_id": 132247, "author": "eldarerathis", "text": "A .dex file is basically a compiled bytecode version of an app that has been built for the Dalvik VM which Androids apps run on. I believe it is short for Dalvik Executable, but Im not sure if Im remembering that correctly. An .odex file is an Optimized .dex file (hence the o), meaning that it has basically been pre-compiled for a specific platform. The advantage to this is that startup time is much faster since the VM doesnt have to perform any optimizing at startup/runtime. The disadvantages are 1) That it takes up some extra space and 2) An odexed app wont run properly if its put onto another device, and it must have the associated .odex file to run at all. ROMs are typically released in deodexed form because they can be themed and modified fairly easily, whereas theming/modifying an odexed ROM is basically impossible (at best things would crash like crazy). Some people also choose to release their ROMs in odexed versions for people who would prefer the performance gains. Most of the stock ROMs Ive seen are odexed, I assume because the carriers/manufacturers want the performance boost. They also dont have any kind of official theming methods, so they probably dont care if you cant change the colors of your status icons or your system text or whatnot. Plus they have the advantage of knowing what device they want their system to run on, so they can pre-compile the .odex files very easily, I imagine. As an added bonus, it makes it difficult to pull .apk files off of the device and share them with people. Edit: Since I just realized that I was not very explicit about the difference here - an odexed ROM is one where the apps have been precompiled, and thus contain associated .odex files which are external to the .apk files. A deodexed ROM is one where data that would be optimized in the .odex files has been more or less merged back into the .apk files (meaning it is not optimized but basically platform agnostic), thus the .apk files are self-contained and there are no .odex files. This is usually just done with a deodexing utility, such as smali/baksmali."} +{"doc_id": 132248, "author": "Lie Ryan", "text": "A .dex (dalvik executable) file is basically similar to Windows .exe files (except in Dalvik VMs language, instead of compiled code). Basically, .odex (optimized dalvik executable) and deodex (de-optimized dalvik executable) relates to how resource files are handled in the applications. The .odex is a .dex file that have been optimized by the dexopt program: http://www.netmite.com/android/mydroid/dalvik/docs/dexopt.html Advantage odex is faster it makes themeing easier Disadvantage deodex is slower on first startup the .odex files makes theming harder"} +{"doc_id": 197799, "author": "Gilles 'SO- stop being evil'", "text": "echo prints its argument followed by a newline. With multiple arguments, they are separated by spaces. Depending on the unix variant, the shell and the shell options, it may also interpret some escape sequences beginning with \\, and it may treat the first argument(s) as options if they start with -. printf interprets its first argument as a format, and subsequent arguments as arguments to the % specifiers. No newline is added unless you specify one. In the first argument, all characters except two are interpreted literally: % starts a printf specifier, and \\ starts an escape sequence (e.g. \\n for a newline character). Because different shells work differently, echo $string does not always print the specified string plus a newline. A reliable way of printing a string is printf %s $string. If you want a newline after the string, write printf %s\\n $string. In your case, assuming that blah doesnt start with a - or contain % or \\, the only difference between the two commands is that echo adds a newline and printf doesnt."} +{"doc_id": 558260, "author": "Mark", "text": "The XKCD password scheme is as good as it ever was. The security doesnt derive from it being unknown, but from it being a good way to generate memorable passwords from a large search space. If you select the words to use rather than generate them randomly, though, this advantage is lost -- humans arent good at being random. The bit about memory is poorly stated, but it is a concern: if password-stealing malware ever gets on your computer, itll sweep up everything text-like from RAM and the hard drive to use in a directed attack on your accounts."} +{"doc_id": 558267, "author": "Dick99999", "text": "Id also like to add a yes answer also, but for other reasons. Its not a good advice [in general] because of length constraints: Sites like Skype, ING, eBay, and in my country Binckbank ans KPN limit passwords to 20 characters. (That bank limit is 15, but it used 2 factor authorization) With an average length of 4.5 characters/word for a short 3000-8000 word dictionary, that allows for using 3-4 word phrases only. When using large dictionaries the average may be 6-7: 3 words only If the site insists on using a symbol and a number in the password, only 18 characters are available for the phrase. Those lengths only protect against online attacks. For Off-line attacks is depends on the key derivation and hash function, iteration counts and cracking hardware used by the site of app, whether a 3-4 word phrase offers sufficient protection."} +{"doc_id": 33981, "author": "jerrylroberts", "text": "For those who are not looking for a command line solution, you can change it under System Preferences -> Sharing -> Computer Name:"} +{"doc_id": 558269, "author": "Raestloz", "text": "No, I dont think so. The actual advice in that xkcd comic are to use mnemonics that are easy for you to remember and generate password as long as you can remember them. Those are basic password advice anywhere, and will always stand true (even the quoted Schneiers method uses these two basic facts). Indeed, the comic makes use of common English words, but your implementation doesnt have to be, nor did the comic implies that you should. Of course, the most secure passwords are totally random strings like how an MD5 string looks, and you probably can use a password manager to store all those passwords, but then what password are you going to use for that manager? \u00af\\ (\u30c4) /\u00af"} +{"doc_id": 361668, "author": "Jeroen Vermeulen - MageHost", "text": "For another user, tested on Ubuntu 18.04: sudo -u username bash -c ssh-keygen -f ~username/.ssh/id_rsa -N "} +{"doc_id": 558283, "author": "Tom Leek", "text": "Schneier writes this: This is why the oft-cited XKCD scheme for generating passwords -- string together individual words like correcthorsebatterystaple -- is no longer good advice. The password crackers are on to this trick. but the key to understanding what he is really after is a little further in his essay: Theres still one scheme that works. Back in 2008, I described the Schneier scheme so thats it. Ole Bruce wants to assert that his scheme is the One and Only, the best, the winner, the ultimate scheme. Therefore, he needs to say disparaging things about the competitors, regardless of whether such assertions are scientifically sound or not. In this case, it has always been assumed that the password generation method is known to the attacker. Thats the whole point of entropy computations; see the analysis. That attackers are on to this trick changes nothing at all (when an attacker knows the password generation method, the entropy computation describes exactly the password strength; when the attacker is incompetent and does not know the password generation method, the password strength is only higher, by an amount which is nigh impossible to quantify). The quip about passwords in memory is just more incoherent ramblings. Passwords necessarily go to RAM at some point, whether you type them or copy-paste them from a password safe, or anything similar. My guess is that Bruce was drunk. Update Schneier was specifically asked to comment about his passphrase condemnation in a Reddit AMA (via archive.org, original link) that took place August 2, 2016. He continued to advocate for his password creation system as a superior alternative to random word passphrases. Schneier did say his scheme gives you more entropy per memorizable character than other methods which is true when compared to characters making up words. But this is also irrelevant when a system relies on memorizing words rather than characters, and youre allowed to combine enough words to generate adequate entropy for your passphrase as a whole."} +{"doc_id": 197841, "author": "akawaguc", "text": "I recommend w3mman, the system\u2019s manual pager through the w3m command-line browser. It colors links and enables tabbing through them You can try it by installing the w3m package, which is available in most software repositories and Cygwin."} +{"doc_id": 558292, "author": "The Spooniest", "text": "It depends. One thing you need to understand is that this is not security-by-obscurity: the entropy values used in the comic assume that the attacker already knows youre using this method. If the attacker doesnt know how youre generating the passphrase, then the entropy goes up massively. The trick to the XKCD method is that you need to actually use a random number generator and a good word list: never pick the words yourself, not even randomly (in quotes because humans are actually really bad at picking things randomly, which is why you shouldnt do it). Diceware has tools to help you do this, and even takes the random element out of the computers reach by using ordinary dice. Against a broad-based attack -the sort of thing where an attacker got a list of passwords from a Website and doesnt know anything about whose passwords are in the list- this is as strong as it ever was. Just as you say, its strength comes from the power of exponents (and a good word list). Schneiers attack can work, but only in an entirely different context. His attack assumes that you are being specifically targeted, by an attacker who already knows a great deal about you. This might not seem especially worrisome at first, because the stereotypical determined attacker is an intelligence agent behind a screen, and most of us dont have to worry so much about those: there are only so many of them, and each one can only afford to care about so many people. But its actually more of a problem than it might first seem, thanks to the advent of sophisticated malware. A malware installation can afford to care about you even though the attacker does not, and so you still wind up facing an extremely determined attacker. Even more determined than a human could be, in fact, though far less creative. Malware that compiles information on you will give words that seem important to you very high priority in the word list. It does this because most people pick the random words themselves, but in so doing, they actually bias quite strongly toward the words that are most important to them: it may still feel random, but some words are much more likely to come up than others. For that reason, giving these words high priority often results in relatively quick hits, and this is the trick that Schneier is talking about. However, you can still thwart Schneiers attack by using real randomness. The catch is that this requires discipline: all decisions about what words to use in your passphrase (aside from choosing a good word list) must be taken completely out of your hands. This is where things like Diceware can help you."} +{"doc_id": 623829, "author": "Ron Trunk", "text": "The terms are usually used in the context of data centers. Generally speaking, east-west traffic refers to traffic within a data center -- i.e. server to server traffic. North-south traffic is client to server traffic, between the data center and the rest of the network (anything outside the data center). I believe the terms have come into use from the way network diagrams are typically drawn, with servers or access switches spread out horizontally, and external connections at the top or bottom."} +{"doc_id": 66773, "author": "picciano", "text": "We just submitted our first Beta Review request and were approved in 50 minutes. The app is fairly large and complex. It appears that the Apple testing was very minimal. Edit to add: We just release a second version for external beta testing and it was instantly approved; no review required."} +{"doc_id": 558296, "author": "Thomas Pornin", "text": "Theoretically you can put anything you want in a certificate; for instance, this certificate actually contains a video file as Subject Alt Name (surprisingly, Windows has no trouble decoding a 1.2 MB certificate -- but it does not show the video, alas). However, in practice, certificates for SSL just contain the intended server name, as specified in RFC 2818. The client (Web browser) will verify that the name from the URL indeed appears where it should in the certificate. There is no standard for storing a port number in the certificate, and no client will verify the presence of that port number anyway, so, in short words: certificates are not port-specific. The notion of identity that certificates manipulate and embody does not include the notion of port."} +{"doc_id": 263385, "author": "lese", "text": "Yes it is case sensitive. Im not able to bring technical informations, Ive just tested it, and wondering why you didnt(?) my local machine is linux mint as you can see: # cat /etc/*release DISTRIB_ID=LinuxMint DISTRIB_RELEASE=17.2 DISTRIB_CODENAME=rafaela DISTRIB_DESCRIPTION=Linux Mint 17.2 Rafaela NAME=Ubuntu VERSION=14.04.3 LTS, Trusty Tahr ID=ubuntu ID_LIKE=debian PRETTY_NAME=Ubuntu 14.04.3 LTS VERSION_ID=14.04 HOME_URL=http://www.ubuntu.com/ SUPPORT_URL=http://help.ubuntu.com/ BUG_REPORT_URL=http://bugs.launchpad.net/ubuntu/ cat: /etc/upstream-release: Is a directory and Ive tried to connect to CentOS server like this: \u00b7 Using (wrong) Uppercase username: 8D prova # ssh Root@agora-server Root@agora-servers password: Permission denied, please try again. Root@agora-servers password: Permission denied, please try again. Root@agora-servers password: \u00b7 Using correct username: 8D prova # ssh root@agora-server root@agora-servers password: Last login: Fri Oct 2 01:50:13 2015 from 192.168.0.31 [root@agora-server ~]#"} +{"doc_id": 263396, "author": "Lambert", "text": "In case of local accounts the username is case sensitive. When you use LDAP, it depends. Ive seen cases where the username is case sensitive (on a ZFS appliance connected to LDAP) and cases where it does not matter like Solaris LDAP client connected to Windows AD. What you should/could try is to see whether your system is using LDAP correctly by issuing getent passwd . Using this command should give you a record with the username, home directory and shell for the specified user. If you do not see such record, LDAP is not configured correctly. There are several places where you should configure LDAP and one of the places is: /etc/nsswitch.conf passwd: files ldap group: files ldap You also need to check if PAM is configured correctly and maybe the most important step is to verify if the LDAP client is configured and working. Try a tool like ldapsearch to check if LDAP can be queried. There are several LDAP cookbooks available and most of them depend on the Unix version and LDAP version you are using. Update your question with those details if you need further assistance. Also include your configuration setup (without passwords of course) which can help forum members to analyse your particular issue."} +{"doc_id": 263398, "author": "jlliagre", "text": "Just like hostnames and domain names, the username is not strictly a Unix thing but can and often does span a wider range of OS types. Whether they will be considered case sensitive depends then on the standard used to specify them. Hostnames and domain names are clearly case insensitive by the DNS standard (see RFC4343). Usernames stored on a local backend (/etc/passwd) or a Unix style one (NIS) are not case insensitive by the POSIX standard. Usernames stored in an LDAP or an Active Directory backend will follow the used attribute schema definition, uid and cn which are often storing the user name have a differing schema attributes, case insensitive for the former but case sensitive for the latter. That means both Abc and abc might match or not abcs entry depending on the ldap server configuration. Due to this inconsistency, I would recommend to only use lowercase for both usernames and host/domain name and then avoid ssh ABC@SERVERNAME.DOMAIN.COM which is rude anyway."} +{"doc_id": 328938, "author": "bil", "text": "Riffing off of Prabhats question above, I had this issue in macos high sierra when I stranded an encfs process, rebooting solved it, but this ps -ef | grep name-of-busy-dir Showed me the process and the PID (column two). sudo kill -15 pid-here fixed it."} +{"doc_id": 66797, "author": "Alex", "text": "There is a wonderful tool Karabiner that can help you with this (and many more): Properties->Misc&Uninstall -> private.xml Screensaver Lock screen (Ctrl-Cmd-L) myconfig.lock __KeyToKey__ KeyCode::L, ModifierFlag::CONTROL_L | ModifierFlag::COMMAND_L, KeyCode::VK_OPEN_URL_APP_ScreenSaverEngine Then activate the Screensaver Lock screen (Ctrl-Cmd-L) in the Change Key screen. Update [1 Feb 2018]: The Karabiner was deprecated and replaced with Karabiner-elements. So achieving this may be as simple as mapping the MAC POWER button to some button on the keyboard. I use PAUSE button for that. Then locking screen would be Ctrl-Shift-Power and suspending - Cmd-Opt-Power."} +{"doc_id": 197879, "author": "polemon", "text": "Id advise to make a named pipe (mkfifo) and then write to that file. Then, read from it. You can always do that with things like tail, to minimize output, etc. Whenever you clear the pipe (read from it), it gets cleared, so the output is not preserved. The other option would be to write everything to a file (much like a logfile) and then analyze it an any time. This would be the preferred action, if you want to preserve all output."} +{"doc_id": 558329, "author": "nandin", "text": "Actually in this case, the origin of Google analytic script is a.com Quote from JavaScript: The Definitive Guide: It is important to understand that the origin of the script itself is not relevant to the same-origin policy: what matters is the origin of the document in which the script is embedded."} +{"doc_id": 99578, "author": "Chris", "text": "There is not a dark mode for preview. You can use the flux software to at least bring the color warmth up and brightness down. It makes the whole OS easier on the eyes at night. In another hand, there is iBooks which, I think, have a dark mode."} +{"doc_id": 427260, "author": "Mark Ransom", "text": "Theyre usually not truly random, but are called pseudo-random because they generate a number sequence that appears random. This is done with some interesting mathematical formulas. One of the most common is the Linear Congruential Generator. Pseudo-random numbers do have one useful property that true random numbers dont: if you use the same seed when you start you will get back an identical sequence. This can be very handy for testing."} +{"doc_id": 99583, "author": "Daniel", "text": "While there is not an inverted color scheme for Preview, you can invert the screen colors for the entire system by pressing Command-Option-Control-8 Press the sequence again to restore the default color scheme."} +{"doc_id": 263423, "author": "Falsenames", "text": "The user names are definitely case sensitive. You can easily test this by adding two users with similar names: ~ # useradd foobar ~ # useradd fooBar ~ # grep ^foo /etc/passwd foobar:x:1001:1001::/home/foobar:/bin/sh fooBar:x:1002:1002::/home/fooBar:/bin/sh This question/answer shows how to compensate for someone trying to log in with the a username that has the wrong case according to the LDAP servers. But note that this will only work if the usernames are all listed as lowercase (or you can make them all uppercase if you want)."} +{"doc_id": 99591, "author": "George", "text": "See if this works: Unplug ALL USB devices, have your wireless keyboard and mouse turned on, and restart. The system sees that there are no input devices and searches for them. Then asks you to type a passcode and hit return. If you have the Apple Keyboard 2 which has the Lightning port on the back of the keyboard, all you need to do is connect the USB -> Lightning cord to the computer and it will just like a normal wired keyboard."} +{"doc_id": 197900, "author": "Gilles 'SO- stop being evil'", "text": "If all you want to do is spy on the existing process, you can use strace -p1234 -s9999 -e write where 1234 is the process ID. (-s9999 avoids having strings truncated to 32 characters, and write the system call that produces output.) If you want to view only data written on a particular file descriptor, you can use something like strace -p1234 -e trace= -e write=3 to see only data written to file descriptor 3 (-e trace= prevents the system calls from being loged). That wont give you output thats already been produced. If the output is scrolling by too fast, you can pipe it into a pager such as less, or send it to a file with strace -o trace.log \u2026. With many programs, you can divert subsequent output with a ptrace hack, either to your current terminal or to a new screen session. See How can I disown a running process and associate it to a new screen shell? and other linked threads. Note that depending on how your system is set up, you may need to run all these strace commands as root even if the process is running under your user with no extra privileges. (If the process is running as a different user or is setuid or setgid, you will need to run strace as root.) Most distributions only allow a process to trace its children (this provides a moderate security benefit \u2014 it prevents some direct malware injection, but doesnt prevent indirect injection by modifying files). This is controlled by the kernel.yama.ptrace_scome sysctl."} +{"doc_id": 427286, "author": "SF.", "text": "Are you asking for Pseudorandom or Random? Others answered about pseudorandom, let me talk about Random. There were (are?) actual hardware-based Random Number Generators in sale. They were based on a chip with a small radio measuring white noise of deep space radiation, or a small radioactive sample and measuring periods between its decay. The problem with them was the bandwidth - the amount of entropy they could generate wasnt very high so they were used for seeds of pseudorandom algorithms. They were used in bank systems, high-security and the likes. OTOH, if you meet any embedded systems developer, they will laugh at these. For common purposes in programming a microcontroller, reading low 4 bits of any 16-bit Analog-Digital Converter woth a floating (unconnected) pin will produce a perfectly good random noise, at more than sufficient bandwidth (the shorter the polling period the more noisy the readout), and easier than writing actual RNG routine. And considering ADCs are commonly found implemented in silicon of microcontrollers, commonly implemented, and often implemented with 8 channels from which you need maybe 5 for your application, its practically free. And even if you dont have an ADC, couple of elements connected to a digital GPIO pin will produce a pretty good noise. In embedded, noise is ever-present (and constantly fought), and so obtaining some true randomness is very easy."} +{"doc_id": 460059, "author": "AxelS", "text": "Upstream = up the well (the origin of the software) Downstream = downwards to the ocean (of use cases) Open Source developers who are downstream build applications and tools on the finished products. Those who are upstream actually work on the products to be released. If you develop software: you depend on the stability of the upstream development. And those who work on your software are downstream developers."} +{"doc_id": 558370, "author": "PiTheNumber", "text": "Yes, you should be worried. You should contact the hotel staff, and you should not use the network any more. It is likely the router\u2019s DNS is manipulated. It is possible that the hotel wants to make some money on the side by injecting ads. However, this script looks evil. It tries to open a dialog that tricks you into installing a trojan by displaying a message that is supposed to look like a Windows update: http://www.reduxmediia.com/apu.php?n=&zoneid=5716&cb=3394654&popunder=1&direct=1 Update The answer is not really worth so many up-votes. So let me add some more about what the script is doing. It opens an iframe to storage.com and uses postMessage to store/query data with the ID 90e79fb1-d89e-4b29-83fd-70b8ce071039. These ads, besides the ad above, mentioned popup, it also loads JavaScript from: http://www.adcash.com/script/java.php?option=rotateur&r=274944 Also, there is something happening with keywords and an msn.com search query on mousedown."} +{"doc_id": 361764, "author": "Gabriel Staples", "text": "Just use time [any command]. Ex: time sleep 1 will sleep for a real time (ie: as timed by a stop watch) of ~1.000 to ~1.020 sec, as shown here: $ time sleep 1 real 0m1.011s user 0m0.004s sys 0m0.000s What a beautiful thing. You can put any command after it, and it outputs the result in a nice, human-readable form. I really like to use it for timing builds. Ex: # time your make build time make # time your Bazel build time bazel build //path/to/some:target ...or for git operations which can potentially be really long, so I can develop realistic mental expectations: # time how long it takes to pull from a massive repo when # Im working from home during COVID-19. NB: `git pull` # is sooooo much slower than just pulling the one branch # you need with `git pull origin `, so just fetch # or pull what you need! time git pull origin master For more-customized timing needs where you may need to manipulate the output or convert it to other forms, in bash, use the internal $SECONDS variable. Heres a demo, including converting these seconds to other units, such as floating point minutes: Note that dt_min gets rounded from 0.01666666666... (1 second = that many minutes) to 0.017 in this case since Im using the printf function to round. The sleep 1; part below is where youd call your script to run and time, but Im just sleeping for 1 second instead for the sake of this demo. Command: start=$SECONDS; sleep 1; end=$SECONDS; dt_sec=$(( end - start )); \\ dt_min=$(printf %.3f $(echo $dt_sec/60 | bc -l)); \\ echo dt_sec = $dt_sec; dt_min = $dt_min Output: dt_sec = 1; dt_min = 0.017 Related: Read more about bc and printf in my answer here: https://stackoverflow.com/questions/12722095/how-do-i-use-floating-point-division-in-bash/58479867#58479867 I dont remember where I first learned about the time command anymore, but it may have been from @Trudberts answer right here."} +{"doc_id": 558382, "author": "user51945", "text": "Without looking at the code: Yes, you should be worried! Nobody should tamper with your internet traffic, as this opens many possible threat scenarios. Even if you try to open any page and it is showing a page instead that is asking for the WiFi credentials this is impossible, as the router has first redirected your DNS query and then pretends to be the server you were trying to reach. The best way when you are using other peoples networks is to open a secured tunnel to a trusted server (OpenVPN, SSH) and to only use this."} +{"doc_id": 132404, "author": "Amanda", "text": "CyanogenMod has a good definition of ROM (and a lot of the other terms on that list) Read Only Memory. In the context of an Android device, ROM is the internal flash memory where the core operating system resides. It can also refer to a specific version firmware that can be applied to a device through a process usually referred to as flashing. An improperly flashed ROM can often brick the device, rendering it unusable."} +{"doc_id": 34111, "author": "pldg", "text": "Actually, something like pmset -g log|grep -e Sleep -e Wake is what really gives me a clean timeline of sleep/wake events on 10.8.2. powerd does not log anything about it, at least on my system (10.8.2, MacBook Pro Retina 15). 02/03/13 19:48:37 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:99%) 26 secs 02/03/13 19:49:03 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:99%) 27 secs 02/03/13 19:49:30 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:99%) 26 secs 02/03/13 19:49:56 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:99%) 26 secs 02/03/13 19:50:22 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:99%) 26 secs 02/03/13 19:50:48 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:99%) 26 secs 02/03/13 19:51:14 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:100%) 1802 secs 02/03/13 20:39:17 GMT-03 Sleep Maintenance Sleep Sleep: Using BATT (Charge:100%) 244 secs 02/03/13 20:43:21 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:100%) 51 secs 02/03/13 21:07:17 GMT-03 Sleep Maintenance Sleep Sleep: Using BATT (Charge:100%) 242 secs 02/03/13 21:11:19 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:100%) 1103 secs 02/03/13 21:29:42 GMT-03 Wake Wake due to EC.LidOpen/Lid Open: Using AC (Charge:100%) 03/03/13 00:00:26 GMT-03 Sleep Idle Sleep Sleep: Using BATT (Charge:85%) 96 secs 03/03/13 00:02:02 GMT-03 Sleep Maintenance Sleep Sleep: Using AC (Charge:85%) 38 secs 03/03/13 00:02:40 GMT-03 Wake Wake due to EHC1/HID Activity: Using AC (Charge:85%) 4338 secs 03/03/13 01:14:58 GMT-03 Sleep Clamshell Sleep to DarkWake: Using AC (Charge:100%) 48382 secs 03/03/13 14:41:20 GMT-03 Wake DarkWake to FullWake due to HID Activity: Using AC (Charge:100%) 728 secs 03/03/13 14:53:28 GMT-03 Sleep Clamshell Sleep to DarkWake: Using AC (Charge:100%) 415 secs 03/03/13 15:00:23 GMT-03 Wake DarkWake to FullWake due to HID Activity: Using AC (Charge:100%) 718 secs 03/03/13 15:12:21 GMT-03 Sleep Clamshell Sleep to DarkWake: Using AC (Charge:100%) 156 secs 03/03/13 15:14:57 GMT-03 Wake DarkWake to FullWake due to HID Activity: Using AC (Charge:100%) 834 secs 03/03/13 15:28:51 GMT-03 Sleep Clamshell Sleep to DarkWake: Using AC (Charge:100%) 378 secs 03/03/13 15:35:09 GMT-03 Wake DarkWake to FullWake due to HID Activity: Using AC (Charge:100%)"} +{"doc_id": 1349, "author": "Kevin Yap", "text": "You can use InsomniaX. It sits in your menu bar, and upon activating it, your laptop wont sleep if you close it."} +{"doc_id": 394580, "author": "JBRWilkinson", "text": "The biggest difference is the design of the UI. A good GUI can make or break an application. Mac fans would draw attention to the beautifully designed GUIs of the average Mac OS X app and theyve got a point, but this isnt a technology issue - its a design/ethos/usability issue. As for technical issues, in no particular order: The user can do anything they want in any order at any time, unlike console program in which youre either asking for input or telling them the output. You cannot assume that theyll follow the order you hope, unless you enforce the workflow Wizard-stylee. As already mentioned, events play a big part in this, and you can get multiple events happen while youre servicing the last one, so you cant really construct your state based on the current event. Use closures or a similar mechanism to maintain context across different events. In a console app, your FSM is usually self-contained around the get input, process input, update output loop. There isnt the same kind of structure in GUI programming - the main is a re-entrant event-driven thing, often a ginormous switch() statement. You need to consider different screen sizes/resolutions and allow the GUI to resize from 800x600 up to the users monitor maximum. You need to consider different input strategies - mouse, keyboard, touch, etc. Some technologies come for free (Mouse-wheel scrolling), others require some integration work (Ink). Accessibility - a GUI is much more suitable for less able users who have restricted vision, hearing, motor skills or cognitive skills. A ding noise is nice and obvious compared to a cryptic error message on the console. Internationalization - im assuming your console app is US/ANSI only, but when you get into GUI, you can have language/resource packages that can target other languages and regions with no change to the coding, if you prepared for it from the start. For example, no hard-coded language strings in the code - everything as resource lookups. You have lots more options for implementation technology - web-based, various GUI kits, Flash/WPF, etc. Use of colour and animation. Console programs are generally monochromatic and dont animate much. Many modern GUI frameworks provide themed widgets and have move/size/show/hide animation effects, often for free. Graphics. Console apps sometimes use ASCII art for diagrams, but a GUI app gives you full graphical ability. Lovely art can make a big difference too."} +{"doc_id": 263517, "author": "Victor Klos", "text": "Alternatively you could use STUN which was invented to answer this question in an automated way and is used extensively in internet communications e.g. by SIP and WebRTC. Using a stunclient (on debian/ubuntu do apt-get install stuntman-client) simply do: $ stunclient stun.services.mozilla.com Binding test: success Local address: A.B.C.D:42541 Mapped address: W.X.Y.Z:42541 where A.B.C.D is the IP address of your machine on the local net and W.X.Y.Z is the IP address servers like websites see from the outside (and the one you are looking for). Using sed you can reduce the output above to only an IP address: stunclient stun.services.mozilla.com | sed -ne s/^Mapped address: \\(.*\\):.*$/\\1/p For an alternative STUN lookup using nothing but basic command line tools see my answer on AskUbuntu (intended as a fun exercise, not for production use)."} +{"doc_id": 312893, "author": "nyxee", "text": "I recommend using pigz(Parallel Implementation of GZip) tar -cvf - dir | pigz -9 > /path/to/dir.tar.gz"} +{"doc_id": 132452, "author": "thunsaker", "text": "Update: Google Play Gift Cards are now widely available in many countries. Thanks to @Chahk for reminding me to update. Amazon has released their own Android Appstore which allow you to send Amazon gift cards that are redeemable for Android Apps. This requires a few extra steps, but should let you gift what you want to until this function gets added to the original Android Market Google Play Store."} +{"doc_id": 34161, "author": "Kirk McElhearn", "text": "You can also do this in Terminal. Go to the directory where you want to create the file, then run the following: touch file.txt Or redirect nothing to a text file > file.txt"} +{"doc_id": 623986, "author": "psniffer", "text": "The native VLAN is only relevant to 802.1q, yes it is untagged by default, but can be tagged if required. Ports will be assigned to the native VLAN if no other config is present. Its ok to keep it as VLAN 1, but it can be changed, you just have to remember to shut down unused ports. What I mean by this, if I plugged my evil hacker laptop into a working port that was left as VLAN 1, I could potentially be able to span across your entire network, whereas you could change the native VLAN to VLAN 10, and then not assign VLAN 10 to any ports. ISL has no concept of a native VLAN."} +{"doc_id": 623987, "author": "Eddie", "text": "This is an often confused point for people new to the Networking, in particular to people coming up the Cisco track, due to Ciscos over emphasis on this point. It is more or less just a terminology thing. Let me explain. The 802.1q standard defines a method of tagging traffic between two switches to distinguish which traffic belongs to which VLANs. In Cisco terms, this is what happens on a trunk port. Ive seen other vendors refer to this as a tagged port. In this context, it means the same: adding an identifier to frames to indicate what VLAN the frame belongs to. Terminology aside, the main thing to keep in mind is a VLAN tag is necessary, because often the traffic traversing two switches belongs to multiple VLANs, and there must be a way to determine which 1s and 0s belong to which VLAN. But what happens if a trunk port, who is expecting to receive traffic that includes the VLAN tag, receives traffic with no tag? In the predecessor to 802.1q, known as ISL (cisco proprietary, but archaic, no one supports it anymore, not even Cisco), untagged traffic on a trunk would simply be dropped. 802.1q however, provided for a way to not only receive this traffic, but also associate it to a VLAN of your choosing. This method is known as setting a Native VLAN. Effectively, you configure your trunk port with a Native VLAN, and whatever traffic arrives on that port without an existing VLAN tag, gets associated to your Native VLAN. As with all configuration items, if you do not explicitly configure something, usually some sort of default behavior exists. In the case of Cisco (and most vendors), the Default Native VLAN is VLAN 1. Which is to say, if you do not set a Native VLAN explicitly, any untagged traffic received on a trunk port is automatically placed in VLAN 1. The trunk port is the opposite (sort of) from what is known as an Access Port. An access port sends and expects to receive traffic with no VLAN tag. The way this can work, is that an access port also only ever sends and expects to receive traffic belonging to one VLAN. The access port is statically configured for a particular VLAN, and any traffic received on that port is internally associated on the Switch itself as belonging to a particular VLAN (despite not tagging traffic for that VLAN when it leaves the switch port). Now, to add to the confusing mix. Cisco books will often refer to the default VLAN. The Default VLAN is simply the VLAN which all Access Ports are assigned to until they are explicitly placed in another VLAN. In the case of Cisco switches (and most other Vendors), the Default VLAN is usually VLAN 1. Typically, this VLAN is only relevant on an Access port, which is a port that sends and expects to receive traffic without a VLAN tag (also referred to an untagged port by other vendors). So, to summarize: The Native VLAN can change. You can set it to anything you like. The Access Port VLAN can change. You can set it to anything you like. The Default Native VLAN is always 1, this can not be change, because its set that way by Cisco The Default VLAN is always 1, this can not be changed, because it is set that way by Cisco edit: forgot your other questions: Also, can it / should it be changed? This is largely an opinion question. I tend to agree with this school of thought: All unused ports should be in a specific VLAN. All active ports should be explicitly set on to a particular VLAN. Your switch should then prevent traffic from traversing the uplink into the rest of your network if the traffic belongs on VLAN1, or the VLAN you are using for unused ports. Everything else should be allowed up the uplink. But there are many different theories behind this. As well as differing requirements which would prevent having such a restricted switch policy (scale, resources, etc). For instance, if a switch is going into part of a network that is only one VLAN and its not VLAN 1, is it possible to make the default / native VLAN on all ports a particular VLAN using one global command, or is the preferred method to make all ports access ports and set the access VLAN to 10 on each of them? You can not change the default Cisco configurations. You can use the interface range to put all ports in a different VLAN in one go. You dont really need to change the Native VLAN on the uplink trunk, so long as the other switch is using the same Native VLAN. If you really want to spare the switch from adding the VLAN Tag, you could get creative and do the following (although, its probably not recommended). Leave all access ports in the VLAN1. Leave the Native VLAN at its default (VLAN1). On the uplink switch, set the port as a trunk port. And set its Native VLAN to the VLAN you want the lower switch to be a part of. Since the lower switch will send traffic to the upper switch untagged, the upper switch will receive it and associate it with what it considers the Native VLAN."} +{"doc_id": 132471, "author": "Amanda", "text": "Ive been using F-Droid, which is a smallish repository focused on free and open source tools. Not all encompassing by any means but one of the few that is explicit about showing you the license before you download an app. Edit by Izzy: F-Droid is a small, but superb alternative to Google Play. Though it offers only about 1.200 apps1 in its main repository, they are mostly high quality. In comparision, these are some advantages it has over the Playstore: Apps are OpenSource, which means they are all free (to support the devs, there are donation buttons on each apps page) F-Droid compiles the packages from the sources. This adds a level of security, as it makes sure nothing was sneaked into. The draw-back is, you cannot simply cross-update apps between the F-Droid main repo and other sources. Compiling the sources themselves, they also often throw out binary blobs and other unfree material (i.e. included modules from proprietary sources). In many cases, this means the advertisement stuff being removed. This again increases security (and privacy). But in some cases it has the draw-back of some functionality being broken, if the removed part was e.g. stuff enabling specific Google services, such as maps. Next to its main repository, F-Droid offers several others2. Developers can provide their own channel, e.g. for Beta versions. A nice way also for companies or organizations to keep together groups of apps their members are supposed to use. F-Droid is very clear when it comes to anti-features such as ads. If an app in any way deviates from the OpenSource idea, thats pointed out straight in colored boxes you cant miss when browsing the apps pages. Quoting bgvaughan from the comments: f-droids Preferences allow you to screen apps by various traits: whether they contain advertising, track your activity, or promote non-free add-ons, among others. While I miss the lack of user ratings on f-droid, and f-droid app versions sometimes trail behind those on Google Play, the fact that I can be sure that anything on f-droid is free, open-source, and not a freaking PITA, is more than enough to recommend its use. I can also only approve what Nicolas says about F-Droid in his comment: F-Droid apps are guaranteed open source, ad-free, tracking-free. That means smaller apps that dont waste your bandwidth for anti-features. F-Droid also offers its own Android client, the F-Droid Application Manager: F-Droid App (source: AndroidNext; click for larger variant) More details on F-Droid can also be found at Wikipedia. Interesting fact: F-Droid is a fork of Aptoide3. 1 as of 06/2014 2 see also Broams and daithib8s comments below \u2013 and this list 3 see Wikipedia: F-Droid History"} +{"doc_id": 34172, "author": "Richard Fuhr", "text": "There are two useful utilities that you can download and install that will enable you to create a new text file (or RTF file) in a currently-open folder that you are viewing using the Finder. The utilities are called NewTextFileHere and NewRTFHere and can be downloaded from http://mac.softpedia.com/developer/Jonas-Wisser-37498.html Icons for either of these apps can then be included on all of your Finder windows."} +{"doc_id": 623997, "author": "Abhishek Balaji R", "text": "An IP address can be broken down into two parts, namely the Network Address and Host Address. In your example, /29 refers to the Subnet Mask which means the 29 bits of the total 32 bits from MSB to LSB are all 1s, i.e., 255.255.255.248 . This will allow for 8 addresses per subnet. The first address in the subnet, 178.18.230.120 is the Network Address in this case and - the last address, which is all 1s - i.e., 172.18.230.127 is the Broadcast Address. The host address can range anywhere between the Network and Broadcast Address, i.e., 172.18.230.121 to 172.18.230.126 . You can arrive at these addresses by performing an logical AND on the IP address and the Subnet Mask. The first IP address is usually reserved for the Network address and the last one in that block is for the Broadcast address, while the remaining ones can be used as Host addresses. Loopback address is a special reserved IP address, usually referred to as the localhost - 127.0.0.1 and is used to communicate with the localhost. Edited: You can convert IP addresses into the binary form and perform logical AND. But, its much easier and less time consuming to have them in Decimal representation. 178.18.230.127 (AND) 255.255.255.248 -------------------- **178.18.230.120** -----> **Network Address** -------------------- 178.18.230.127 (AND) 255.255.255.255 -------------------- **178.18.230.127** -----> **Broadcast Address** -------------------- Keep in mind that the first and last one in a block are always Network and Broadcast Addresses(reserved). Host Addresses: Any other address lying between the Network and Broadcast Addresses are Host Addressable, which means they can be assigned to a Host and are NOT reserved for any specific purpose, unlike the Network and Broadcast Addresses."} +{"doc_id": 34184, "author": "Nick T", "text": "Apparently, for Windows running natively on Apple hardware fn + return creates an Insert key press."} +{"doc_id": 165272, "author": "user2021355", "text": "Issue: adb and fastboot modes for the same Android device are recognized as separate devices under Windows Scenario: the device is visible with adb devices but is not detected with fastboot devices and the other suggestions here dont work; you still end up with waiting for device on Windows and arent able to choose a driver to install via rahul pandeys answer using the Google USB driver. Possible Solution: manually update the driver for your device in Device Manager while its connected in fastboot mode Steps: Download and install the Android USB drivers for your device. For example, Samsung drivers for Samsung phones or the (venerable) Google Galaxy Nexus Reboot device into fastboot mode using adb reboot bootloader or by pressing Volume Up + Volume Down + Power buttons simultaneously Open Device Manager Unplug/plug in your Android device so you can easily find your unrecognized device in the list Under Other Devices find your Android phone with a yellow triangle Right-click on it and select:Update driver > Browse my computer for driver software > Let me pick from a list of available drivers on my computer Look for the driver you just installed and select it. For my Google Galaxy Nexus it was Samsung Android ADB Interface. With any luck that will solve your problem. Now you can use the bootloader unlock method for your device (or, in my case, flash an OEM ROM). Thanks to: https://android.stackexchange.com/a/106468/52235 for pointing me in the right direction."} +{"doc_id": 132512, "author": "Matthew Read", "text": "No one pays or gets paid. According to the original version of Amazons developer agreement (see section 2(a)) a developer received at least 20% of the price the developer set for the app, regardless of the price Amazon actually sold it for. But they changed the agreement, adding the sentence No Royalty is payable for Apps with a List Price of $0.00."} +{"doc_id": 329121, "author": "Ankur Jain", "text": "I am using it like this in my bash_profile and .bashrc like this vi .bash_profile alias dirs=dirs -v source d.sh :wq vi .bashrc alias dirs=dirs -v :wq vi d.sh pushd ~/Documents/GIT/seiso-data pushd ~/Documents/GIT/ewe/EosRegistry pushd ~/Documents/GIT_LODGING/site-cookbooks pushd ~/Documents/CHEF_LODGING pushd . :wq it helps me jump in between directories to most recent used on my terminal. :-) Hope it helps you to use pushd rather popd i use cd ~stackednumber"} +{"doc_id": 624038, "author": "Ricky", "text": "IGMP Snooping is a feature for switches to learn what multicast groups are needed on which ports. Routers not handling multicast routing dont care. That said, without an mrouter in the network, you need to configure one (or more) igmp queriers. This ensures group membership reports are flood through the network periodically to keep the forwarding information up-to-date; otherwise it expires and traffic is either dropped, or floods everywhere. The benefit is that multicast traffic goes only where it belongs. If nothing on that port has joined that group, the switch will prune it. In a cascaded environment, that can significantly reduce inter-switch traffic. And likewise keep access links clear of traffic hosts dont want. If you dont know its enabled, situations may arise wherein traffic may appear to be missing on the wire, because its being pruned. (fast-leave, no querier, no joins, etc.) Thus, its rarely enabled by default."} +{"doc_id": 198058, "author": "Johan", "text": "You could use the system clip board with *y and *p instead of the normal y and p."} +{"doc_id": 460204, "author": "JeffO", "text": "Since your manager knows it will probably fail, youre better off than most. I would consider working with the manager and see if there are any parts/features of the app that can be excluded. Too often we think every client request is a deal killer and go out of our way to promise the delivery. Until someone works with the client and probes deeper, you may not be able to make these decisions. If youre not able to do this, still try to deliver what you think is most important. Sometimes its easier to ask for forgiveness than permission."} +{"doc_id": 460205, "author": "MrFox", "text": "Communicate your concerns in the most concise and non-confrontational way possible up the management ladder. Summarize the risks, but do not impose your conclusion on them. Management must always have the choice of what to do, but it is your job to assess and communicate the situation. Use email, so as to leave a paper trail when things go south. Having done that, keep working on the project in good faith. Keep in mind, you may not know everything there is to know about the project, its backers, and financial decisions behind it. Management decisions that seem stupid to you might actually be based on smart reasoning that isnt visible to you."} +{"doc_id": 460206, "author": "Dan Pichelman", "text": "Keep a paper trail (e.g. diary, saved emails, etc). Only include facts and objective observations. Leave all conclusions up to whomever (if anyone) reads what youve written. As a developer, if youre not viewed as an obstacle to the project youre likely to come out fine from the finger-pointing that will no doubt happen. Your manager may not be so lucky, but thats not relevant here. Just on general principles, update your resume and make sure you occasionally meet with other devs outside your company. If youre not part of any local developer groups, go join 2 or 3. It takes years to build up a network of friends and acquaintances but in the long run its worth it. Be sure to look on it as a 2-way street - if you can help fill an opening in your company with someone capable, thats just as good as someone helping you find a job."} +{"doc_id": 460208, "author": "code4life", "text": "What impact will this soon-to-be failed project have on your career at the firm, and beyond? In my experience, merely being associated with successful projects is not an indicator of your own personal excellence. The qualities that you exhibit in the face of adversity and sometimes what looks to be certain failure, often gets noticed by the higher-ups, more so than you think. And Im talking about beyond your immediate manager. I personally have experienced seeing my immediate manager get fired for incompetence, and then found myself promoted into his position the very same day. Not pleasant, but it showed that people were watching me, and liked what I did. Oftentimes, the same chaos and disorganization that comes with a failing project, affords you the opportunity to shine. So look at the project this way: what opportunity does this failing project afford, for the light to shine on all of my strengths and best qualities? What lessons am I learning from this experience, that will make me a better professional and a better person? Essentially, the sum of experiences drawn from failures is what fuels true success. Note: Thomas Owens asked, what specific things a person can do in a project like this. I have a few general suggestions, which Ive used as personal guidelines in these situations. Will it help a distress project to miraculously succeed? No - but Ive found it has helped me to keep a proper perspective on things, and find personal success in spite of the bad situation. 1) Focus on personal excellence - strive to write ever better code, meet ever higher standards of quality and functionality. 2) Focus on personal metrics - how much code do you write, that spawns subsequent bugs? Reduce that ratio to as low as you can. When asked to provide an estimate for a task assigned to you, are you generally accurate, or do you find that youve over/under buffered the timeline too much? When actually assigned a task, do you provide a good level of feedback on the progression of work, including giving well-and-ahead advance notice of delivery timeline problems? 3) Focus on team metrics - These are just some things off the top of my head: Are other team members lagging because of a dependency on a task you are working on? Are you good at delegating or dividing your task/subtasks to others in the team? Do you find it difficult to communicate with one or more members of the team? All areas that I work on to regularly improve in."} +{"doc_id": 460209, "author": "ozz", "text": "In a situation like this, as the lowest rung of the ladder, there is only so much you can do to help the project. Make sure your work is spotless help identify the biggest problem areas Try to provide answers, not just problems. Look like you are trying to fix them. Aside from that, you really do have to look after number 1. Document everything keep all emails, IM conversations Try and find a way out of the project if it all possible"} +{"doc_id": 198068, "author": "gertvdijk", "text": "Use the -l option to ssh-add to list them by fingerprint. $ ssh-add -l 2048 72:...:eb /home/gert/.ssh/mykey (RSA) Or with -L to get the full key in OpenSSH format. $ ssh-add -L ssh-rsa AAAAB3NzaC1yc[...]B63SQ== /home/gert/.ssh/id_rsa The latter format is the same as you would put them in a ~/.ssh/authorized_keys file."} +{"doc_id": 558520, "author": "Dennis Jaheruddin", "text": "All explanations so far are a bit long, here is a short one: Some people who cant remember their bank pin, keep a note in their wallet. If a thief or aquaintence would get to look inside the wallet theyd have a problem, UNLESS the pin is written in a way that they cant read it. Hashing is basically writing text in a way that nobody else can read it."} +{"doc_id": 67000, "author": "Fred Stutzman", "text": "The answer is to hold down command + shift + option whilst dragging the body of the terminal (not the tab) back to the terminal you wish to merge. Source: http://azaleasays.com/2014/03/05/iterm2-merge-a-pane-back-to-window-or-tab-bar/"} +{"doc_id": 460218, "author": "cdkMoose", "text": "Try to be proactive about finding a new way to achieve success for the project. Think about how you can propose some alternatives. Right now your boss is probably getting beat up about the project being a failure, wouldnt (s)he appreciate someone coming in with solutions instead of problems? Maybe there is a way to split the features into staggered deliverables? There are often degrees of must have, so see if you can get those prioritized and group them into milestones. Better to have some product at the end of the timeline than nothing. Or consider splitting the team between people working on new functionality and others working on the stability, this way you can show some progress on both fronts. If these efforts succeed, you will have shown that you are a team member who can find a way to succeed, if not, you will still have demonstrated that you dont give up and will work to find a solution."} +{"doc_id": 460221, "author": "Reactgular", "text": "Failing projects can be toxic to the soul, cause depression, over work and low self-esteem. Its all relative to perspective. Ive worked on horrible projects while sitting across from another guy who had a smile on his face every single day. Oh how I wanted to slap that smile off his face. Some people arent bothered by the current state of affairs on a project. They enjoy their contribution, their tasks and are working with in the domain of their interestes. While others, have a strong negative reaction to the current state of things. Its all about our perceived expectations for our daily jobs. While you might be doing some of the work you enjoy. There is clearly elements in the current project you dislike. You need to identify what those problem elements are, and address them. deadline pressure quality control professionalism guidance by management There are many teams and companies that dont find the above aspects of development important. What Ive found is they often think the following. Deadline pressure is perceived as way to motivate people. Quality costs more and returns limit. Professionalism applies to other areas of the business. A manager is a timekeeper and not a person who contributes to development. These problems arent yours. Its theirs, and you shouldnt waste any energy over their behavior. If you arent one of the guys who smiles and enjoys his job even as it heads for a cliff, then you should think about finding a place of like minded developers. Youll be much happier."} +{"doc_id": 558525, "author": "tylerl", "text": "The Short Answer The short answer is: So you dont get hit with a $5 million class-action lawsuit. That should be reason enough for most CEOs. Hashing passwords is a lot cheaper. But more importantly: simply hashing the passwords as you suggested in your question isnt sufficient. Youll still get the lawsuit. You need to do more. Why you need to do more takes a bit longer to explain. So lets take the long route for a moment so that you understand what youre explaining, and then well circle around for your 5-minute synopsis. Hashing is just the beginning But lets start with that. Say you store your users passwords like this: # id:user:password 1:alice:pizza 2:bob:passw0rd 3:carol:baseball Now, lets say an attacker manages to get more access to your system than youd like. Hes only there for 35 seconds before you detect the issue and close the hole. But in those 35 seconds he managed to snag your password database. Yes, you made a security mistake, but youve fixed it now. You patched the hole, fixed the code, updated your firewall, whatever it may be. So everything is good, now, right? Well, no, he has your password database. That means that he can now impersonate every user on your system. Your systems security is destroyed. The only way to recover is to start over with NEW password database, forcing everyone to change their password without using their existing password as a valid form of identification. You have to contact them out-of-band through some other method (phone, email, or something) to verify their identity to re-create their passwords, and in the mean time, your whole operation is dead in the water. And what if you didnt see him steal the password database? In retrospect, its quite unlikely that you would actually see it happen. The way you probably find out is by noticing unusual activity on multiple users accounts. Perhaps for months its as if your system has no security at all and you cant figure out why. This could ruin your business. So we hash Instead of storing the password, we store a hash of the password. Your database now looks like this: # id:user:sha1 1:alice:1f6ccd2be75f1cc94a22a773eea8f8aeb5c68217 2:bob:7c6a61c68ef8b9b6b061b28c348bc1ed7921cb53 3:carol:a2c901c8c6dea98958c219f6f2d038c44dc5d362 Now the only thing you store is an opaque token that can be used to verify whether a password is correct, but cant be used to retrieve the correct password. Well, almost. Google those hashes, I dare you. So now weve progressed to 1970s technology. Congratulations. We can do better. So we salt I spent a long time answering the question as to why to salt hashes, including examples and demonstrations of how this works in the real world. I wont re-hash the hashing discussion here, so go ahead and read the original: Why are salted hashes more secure? Pretty fun, eh? OK, so now we know that we have to salt our hashes or we might as well have never hashed the passwords to begin with. Now were up to 1990s technology. We can still do better. So we iterate You noticed that bit at the bottom of the answer I linked above, right? The bit about bcrypt and PBKDF2? Yeah, it turns out thats really important. With the speed at which hardware can do hashing calculations today (thank you, bitcoin!), an attacker with off-the-shelf hardware can blow through your whole salted, hashed password file in a matter of hours, calculating billions or even trillions of hashes per second. Youve got to slow them down. The easiest way to slow them down is to just make them do more work. Instead of calculating one hash to check a password, you have to calculate 1000. Or 100,000. Or whatever number suits your fancy. You can also use scrypt (ess-crypt), which not only requires a lot of CPU power, but also a lot of RAM to do the calculation, making the dedicated hardware I linked above largely useless. This is the current state-of-the-art. Congratulations and welcome to todays technology. Are we done? So now what happens when the attacker grabs your password file. Well, now he can pound away at it offline instead of making online guess attempts against your service. Sadly, a fair chunk of your users (4% to 12%) will have used the password 123456 or password unless you actively prevent them from doing so, and the attacker will try guessing these first. If you want to keep users safe, dont let them use password as their password. Or any of the other top 500, for that matter. Theres software out there to make accurate password strength calculation easy (and free). But also, multi-factor authentication is never a bad call. Its easy for you to add to any project. So you might as well. Now, Your 5 Minutes of Glory Youre in front of your boss, he asks you why you need to use PBKDF2 or similar to hash your passwords. You mention the LinkedIn class-action suit and say, This is the minimum level of security legally expected in the industry. Anything less is literally negligence. This should take much less than 5 minutes, and if your boss isnt convinced, then he wasnt listening. But you could go on: The cost of implementing hashing technology is negligible, while the cost of not implementing it could be in the millions or higher. and In the event of a breach, a properly-hashed database allows you to position yourself as a well-run security-aware organization, while a database improperly hashed is a very public embarrassment that, as history has shown many times over, will not be ignored or overlooked in the slightest by the media. If you want to get technical, you can re-hash the above. But if youre talking to your boss, then you should know better than that. And analogies are much less effective than just showing the real-life effects that are perfectly visible with no sugar-coating necessary. You dont get people to wear safety gloves by recounting a good analogy. Instead you put some lunch meat in the beaker and when it explodes in green and blue flames you say, thats what will happen to your finger. Use the same principle here."} +{"doc_id": 558526, "author": "brokethebuildagain", "text": "Using analogies can be powerful, but in this case, I think it would be much easier to just explain in simple language what is going on. Something like this should be effective, but probably should include powerpoint slides with illustrations and large corporate fonts. As you know, we require people to use passwords so that we know who they are when they are using our product. We have to keep track of these passwords in order to let people log in. The problem is, we cant store the passwords exactly as they are entered, because attackers have found ways to be able to see them and steal them. We also cant just rewrite the passwords in a clever code and believe that we will be the only ones who know how to translate the code, because that still doesnt guarantee that determined attackers cant figure out the code, and it also doesnt protect against attacks from inside our organization, such as rogue ex-employees. To solve this, we must use a one-way password hash. A password hash is like using a code, except that it is impossible to decode.* This way, only the user knows his or her password. We only store the hash of the password and check it when the user logs in. This keeps our users safe and reduces our liability in the case of a data breach, which can have severe repercussions. [Include examples of companies that have been sued for insecure password storage] * [I know its not impossible, but probably the layman doesnt need that much detail.]"} +{"doc_id": 460228, "author": "Michael Cook", "text": "Sounds like most projects Ive been on. It probably wont end as badly as you think, however: 1) Do your job. Dont worry so much about the overall project as long as you complete your responsibilities. 2) CYA. If the project does fail and you suspect the manager will start blaming everyone but himself, make sure you have enough proof that you did everything required of you (see item 1). 3) Make a few polite suggestions for improvement. Dont sound the warning bells, dont be doom and gloom, just be polite and subtle. For example, if the team isnt writing effective unit tests (or any tests), write a few unit tests closer to what you would like to see and causally mention how doing so helped you solve a particular problem, or saved you time. Whatever it ends up being if you want to affect change focus on the positive steps that have concrete results. This project may never turn out to be a winner, but maybe the team can learn for the next one. Also: 4) Opportunistic refactoring is your friend."} +{"doc_id": 558548, "author": "aaaaaaaaaaaa", "text": "This thread is a bit short on analogies, so here goes: An unhashed password is like a transparent lock, anyone who gets a proper look at it can design the matching key."} +{"doc_id": 1495, "author": "Victor Jalencas", "text": "I use an inexpensive app, Cinch from Irradiated Software. Whenever I need to maximize a window, I just drag it from the titlebar to the top of the screen, and Cinch takes care of resizing it for me. If I need to unmaximize, I just move it a little out of its position and it is restored to its previous state. I wish there were some way to do it (automatically) without extra applications, provided with the default system, but I know not of it."} +{"doc_id": 132570, "author": "Daniel", "text": "The purpose of that feature is so that if there is an emergency you can dial an emergency number (911) without having to unlock the phone. Its not so that someone else can use it to call your wife/mother/ICE number. Its not a bad idea since if you were hurt then someone else could inform those people but as I understand its just for emergency numbers (although to be honest I dont know of any other emergency numbers other than 911). Edit: I just realized that if you lost your phone you wouldnt want others to be able to make calls from the phone (another reason why it only allows emergency numbers). However if, as you suggested, it only allowed certain numbers... that would be cool."} +{"doc_id": 34267, "author": "nwinkler", "text": "The answer is to use Control+Option+Enter, which will create a line break in the cell. As an alternative, Control+Command+Enter can also be used to achieve the same. These combinations work both in Excel for Mac 2011 and the new Excel for Mac 2015. As pointed out by Shameer in this answer, Alt+Enter now seems to work as well in the new Excel for Mac 2015."} +{"doc_id": 1499, "author": "Am1rr3zA", "text": "Download and use RightZoom. It overrides your green plus (zoom) button and works like Windows maximize. You can add it to your login items in your user account to run every time you start OS X."} +{"doc_id": 558557, "author": "The Spooniest", "text": "Explain it in terms of lines of defense. Obviously, youre going to be doing everything you can to make sure that your code is secure. But the fact is, your server will not only run code that you wrote, and you have no control over the code written by other people. Even if all of the other code on the machine is open-source, you would need to hire another 2-3 full-time developers to take responsibility for your own branches of everything. Since -lets not kid ourselves- this whole thing is supposed to be a cost-cutting measure, that is not feasible way to go. Thus, even if you had absolute confidence in your own code, there would still be plenty of room for things to go wrong. You therefore need a way to ensure that even if an attacker gets into the machine, your passwords are still safe. This is where hashing (in quotes because the proper algorithms to use in this day and age arent really hashing algorithms, per se, but its still a useful catch-all term) comes into play. In military terms, this is essentially how (and why) you set up multiple lines of defense. No general puts the entire military in the same spot, because you need to account for the possibility that something you didnt foresee allows your front lines to be defeated or bypassed. Hashing is your home guard: the thing that will protect your passwords when everything else has failed. You hope that this will never be needed, but the cost of not having it when you need it is simply too high: multi-million-dollar lawsuits are only the beginning."} +{"doc_id": 132571, "author": "Matthew Read", "text": "Yes, its limited to emergency numbers. It depends on your region and probably your carrier which ones specifically. Theres no easy way to change it. While its probably possible to hack that mode, I would advise against it. First because it defeats the purpose of having your phone locked (do you really want someone to steal your phone and call your wife?), second because it might screw things up when you really need it (and you cant exactly test whether 911 is working beforehand), and third because you should have ID with you anyways if youre concerned about this."} +{"doc_id": 1505, "author": "Paul D. Waite", "text": "I move the window to the top-left edge of the screen, and drag the window-sizing handle on the bottom-right corner of the window to the bottom-right edge of the screen. I don\u2019t have to do it very often, as windows seem to remember what size they were at."} +{"doc_id": 1507, "author": "Warren Pena", "text": "For many applications (Safari, Mail, Terminal, ...), simply command-clicking the + button will maximize the window."} +{"doc_id": 198117, "author": "AntoineG", "text": "Under OS X (didnt try on Linux), you can simply type Ctrl+T in the terminal running dd. It will print the same output as kill -INFO $PID, plus the CPU usage: load: 1.40 cmd: dd 34536 uninterruptible 3.49u 64.58s 5020305+0 records in 5020304+0 records out 2570395648 bytes transferred in 4284.349974 secs (599950 bytes/sec) I found out about it reading this thread, and trying to open a new tab in my terminal but mixing \u2318+T with Ctrl+T."} +{"doc_id": 1510, "author": "Anton", "text": "On Yosemite: Double click the window top bar (Like in MS Windows) or Option-click the green dot in the top left. On older OS X versions: Shift-click the green (+)"} +{"doc_id": 1511, "author": "Jonathan.", "text": "BetterTouchTool, if you have a macbook pro which has a multitouch pad (Im betting you do), it lets you assign thing like Windows style maximise to the very top middle of your touch pad. But you dont have to use it like that, as it has an AeroSnap type feature, where you can just drag a window to the top of the screen and it will truly maximise, also dragging it to the left/right will make the window take up the left/right side of the screen. I find the Mac version of maximise unsatisfactory for things like web page and xcode. And betterTouchTool has many good features."} +{"doc_id": 198124, "author": "jofel", "text": "A more general way is to use iotop that displays the current amount of disk reading / writing per program. EDIT: iotop -o show only programs which perform currently I/O operations (thanks Jason C for this comment)."} +{"doc_id": 394733, "author": "ChrisF", "text": "Regular expressions - particularly non trivial ones - are potentially difficult to code, understand and maintain. You only have to look at the number of questions on Stack Overflow tagged [regex] where the questioner has assumed that the answer to their problem is a regex and have subsequently got stuck. In a lot of cases the problem can (and perhaps should) be solved a different way. This means that, if you decide to use a regex you now have two problems: The original problem you wanted to solve. The support of a regex. Basically, I think he means you should only use a regex if theres no other way of solving your problem. Another solution is probably going to be easier to code, maintain and support. It may be slower or less efficient, but if thats not critical ease of maintenance and support should be the overriding concern."} +{"doc_id": 394734, "author": "Guffa", "text": "Regular expressions are very powerful, but they have one small and one big problem; they are hard to write, and near impossible to read. In a best case the use of the regular expression solves the problem, so then you only have the maintenance problem of the complicated code. If you dont get the regular expression just right, you have both the original problem and the problem with unreadable code that doesnt work. Sometimes regular expressions are referred to as write-only code. Faced with a regular expression that needs fixing, its often faster to start from scratch than to try to understand the expression."} +{"doc_id": 67056, "author": "William T Froggard", "text": "Apple will not update Bash, because the latest version is licensed under GPLv3, which Apple cannot use. They have updated most of their other shells though. ZSH for example is mostly up to date. References: After a bit of research, this seems like the primary issue: https://www.gnu.org/licenses/gpl-faq.html#Tivoization When people distribute User Products that include software under GPLv3, section 6 requires that they provide you with information necessary to modify that software. User Products is a term specially defined in the license; examples of User Products include portable music players, digital video recorders, and home security systems. This would require that otherwise closed-source software, have its GPLd portions be made modifiable by the public, which would obviously be an issue for Apple."} +{"doc_id": 1525, "author": "BendiLow", "text": "An app I developed, Optimal Layout (14$), gives you keyboard commands to quickly switch and re-organize windows."} +{"doc_id": 591356, "author": "eckes", "text": "Just a BTW, the new NIST Digital Idendity Guidelines (Draft) strongly recommend to use Pepper as well: https://pages.nist.gov/800-63-3/sp800-63b.html#sec5 5.1.1.2 Memorized Secrets Verifier: ... A keyed hash function (e.g., HMAC [FIPS198-1]), with the key stored separately from the hashed authenticators (e.g., in a hardware security module) SHOULD be used to further resist dictionary attacks against the stored hashed authenticators."} +{"doc_id": 67071, "author": "fancyhat", "text": "Just found http://volumemixer-app.com/ its very similar to the one found on windows."} +{"doc_id": 558594, "author": "Briguy37", "text": "Lets say your database with passwords is leaked or stolen: If passwords are in plain-text, all your password are belong to us. If passwords are hashed, all passwords are still in a shared bank-vault that must be cracked. If passwords are hashed and salted, each password is in its own private bank-vault."} +{"doc_id": 460324, "author": "gnat", "text": "I participated in three projects that were clear failure. These were quite painful but looking back, two of three did not have negative consequences on my career, and even third one wasnt the end of the world. Here are some observations I recall. Developers at junior positions (code per spec, fix the bug, stuff like that) dont get affected much, unless they slack off due to lowered morale in the team. At positions like these, a sensible and even sometimes successful survival strategy could be just doing the best you could. For example, one of the failures I experienced has been overcome by plain, methodical fixing of more than hundred known bugs which (coupled with particularly smart approach of promoting this progress by tech lead) eventually led upper management to decision to recover the project and give it yet another chance with a new release, which in turn made a reasonable success. Programmers at more senior, influential positions would better be prepared to share the negative consequences of project failure. An architect, tech lead, senior developer are typically expected to make an impact big enough to be considered responsible for project success or failure. At senior position, one would better be prepared to gain from failure indirectly, by analyzing what went wrong and what could have been done better. These bits of knowledge, post-mortem lessons may be invaluable if learned right, the very successful career at senior positions may depend on how well these are learned, as explained in this brilliant answer at WP: Judgement comes not from success, but from failures. Most companies want to hire people that have had their failures paid for by previous companies... On a more practical note, one can consider next / update release approach as a possible way out of the failure. Coincidentally or not (I think not), but both of the failures that didnt damage my career went by very similar scenarios: release N was a total disaster, release N+1 was tolerable, releases N+2 and later were plain success. Walking in your shoes, I would most likely put some effort in preparing / promoting the idea of next release. Make (and communicate!) something like a tentative list of known issues that you would want to fix after planned release. Draft an informal, rough road map for next release(s). Think of how you could communicate these ideas to people around you, how you could influence management to consider this plan. If the project has someone with good marketing skills, try to involve them into amortizing the failure damage by wrapping up coming release into smoother terms, like early access, beta, customer preview, introductory release, stuff like that. Think of a backup plan in case if higher ups will appear deaf to this idea. Remember above story about fixing of more than hundred known bugs? there is a chance for things to change, really. Management may appear deaf to next-release ideas now, but there is a good chance for them to reconsider in the face of strong convincing evidence of project quality progress. It is quite likely that there will be rather long time between freezing code for planned release and management decision to drop it completely. That time is your chance: if you focus effort on fixing known issues and properly evangelizing the progress, this could make a difference (as it once made to me)."} +{"doc_id": 263719, "author": "Zaz", "text": "cp -a Where -a is short for --archive \u2014 basically it copies a directory exactly as it is; the files retain all their attributes, and symlinks are not dereferenced (-d). From man cp: -a, --archive same as -dR --preserve=all"} +{"doc_id": 1585, "author": "Am1rr3zA", "text": "Use RDC (Remote Desktop Connection Client for Mac 2) This free download runs natively on both Intel-based and PowerPC-based Macs Use One Mac, unlimited Windows."} +{"doc_id": 1586, "author": "Philip Regan", "text": "If you are willing to spend some money, there is LogMeIn, which I have used for my home Macs with great success. On the free side, we used to use Chicken of the VNC but decided to go with Microsofts official client for reasons unknown to me. Six of one half dozen of the other, if you ask me. Frankly, the VNC market is pretty banal."} +{"doc_id": 99895, "author": "Pavel Vlasov", "text": "Invoke \u22ee Chrome Menu \u2192 Window \u2192 Task Manager to see what exactly consumes CPU. Because the helper is a black box from the OS side of view. I personally found a mining extension that way. Also watch for ad content and ad blockers in an escalating war, endless refresh and block cycles."} +{"doc_id": 263757, "author": "sjas", "text": "Use terminator. Should be available via your favourite package manager, if you use a mainstream distribution. It is the only terminal multiplexer where copy-pasting works properly from within panes in your window. Update: tilix is a terminator alternative which is almost completely on par with terminator (and where copypasting works properly, too). After evaluating it I still stick with terminator however due to nicer tab handling and easier configuration (from my POV)."} +{"doc_id": 329301, "author": "Bhagyesh Dudhediya", "text": "There are multiple ways to do this. grep foo\\|bar *.txt egrep foo|bar *.txt find . -maxdepth 1 -type f -name *.txt | xargs grep foo\\|bar find . -maxdepth 1 -type f -name *.txt | xargs egrep foo|bar The 3rd and 4th option will grep only in the files and avoid directories having .txt in their names. So, as per your use-case, you can use any of the option mentioned above. Thanks!!"} +{"doc_id": 460377, "author": "Jim G.", "text": "Work hard; but not at the expense of your family or your health. Keep a record of all critical design decisions; especially as they pertain to your work. Keep networking, and keep your options open if the situation becomes too difficult or you become a victim of a mass layoff. Try not to think of your project as a failed project. Everyone likes people who stay positive and fight hard in the face of adversity. So for as long as possible, try to be that person. A positive outlook, grit, and determination are always good for the workplace. If youre anticipating a failed project, then youre anticipating a post-mortem meeting. At the post-mortem meeting, everyone will be held to account. Be prepared to defend all of your code. [Note: As a general rule, you should always write clean code so that its easy to defend it later.] If you have email or design document that inspired your decisions, then thats even better. At that post-mortem meeting, try to stay positive; and only present your email and design document evidence if your judgement, effort, or workmanship is called into question."} +{"doc_id": 165469, "author": "Reddy Lutonadio", "text": "Yalp Store allows the download of apps directly from Google Play Store. By default Yalp Store connects to Google services using a built-in account, so you do not have to own a Google account to use it . The only reason to use a live Google account is to access the paid apps you own or leave reviews. It has an option to know the number of trackers found in an app and view the apps report via Exodus Privacy"} +{"doc_id": 67174, "author": "Muthukumar Anbalagan", "text": "If you are using sublimetext, then you can use MARKDOWN PREVIEW package from package manager. It has multiple styles like GITHUB, HTML and etc. https://www.sublimetext.com https://facelessuser.github.io/MarkdownPreview/"} +{"doc_id": 394870, "author": "Ryszard Szopa", "text": "If you start following Test Driven Development practices, they will sort guide you through the process and knowing what to test will come naturally. Some places to start: Tests come first Never, ever write code before writing the tests. See Red-Green-Refactor-Repeat for an explanation. Write regression tests Whenever you encounter a bug, write a testcase, and make sure it fails. Unless you can reproduce a bug through a failing testcase, you havent really found it. Red-Green-Refactor-Repeat Red: Start by writing a most basic test for the behavior that you are trying to implement. Think of this step as of writing some example code that uses the class or function that you are working on. Make sure it compiles/has no syntax errors and that it fails. This should be obvious: you havent written any code, so it must fail, right? The important thing to learn here is that unless you see the test fail at least once, you can never be sure that if it passes, it does it because of something that youve done of because of some bogus reason. Green: Write the most simple and stupid code that actually makes the test pass. Dont try to be smart. Even if you see that theres an obvious edge case but the test take in account, dont write code to handle it (but dont forget about the edge case: youll need it later). The idea is that every piece of code yo write, every if, every try: ... except: ... should be justified by a test case. The code doesnt have to be elegant, fast or optimized. You just want the test to pass. Refactor: Clean up your code, get the method names right. See if the test is still passing. Optimize. Run the test again. Repeat: You remember the edge case that the test didnt cover, right? So, now its its big moment. Write a testcase that covers that situation, watch it fail, write some code, see it pass, refactor. Test your code You are working on some specific piece of code, and this is exactly what you want to test. This means that you should not be testing library functions, the standard library or your compiler. Also, try to avoid testing the world. This includes: calling external web APIs, some database intensive stuff, etc. Whenever you can try to mock it up (make an object that follows the same interface, but returns static, predefined data)."} +{"doc_id": 591480, "author": "Ed Daniel", "text": "Who is at risk? Anyone running operating systems that are listed in the patch announcement here: https://technet.microsoft.com/en-us/library/security/ms17-010.aspx How? Malware can be delivered in many ways, once one endpoint is compromised the worm aspect of this malware exploits ms17-010. So, it could be clicking on a link, opening up an archive that has been sent via email etc. etc. https://www.microsoft.com/en-us/security/portal/mmpc/help/infection.aspx It seems to be? Are you kidding me ;-) Watch it spread: https://intel.malwaretech.com/botnet/wcrypt/?t=1m&bid=all Indicators of compromise: https://otx.alienvault.com/pulse/5915d8374da2585a08eaf2f6/ Scan for vulnerable endpoints (nmap): https://github.com/cldrn/nmap-nse-scripts/blob/master/scripts/smb-vuln-ms17-010.nse"} +{"doc_id": 591481, "author": "Nik Nik", "text": "WannaCry attacks are initiated using an SMBv1 remote code execution vulnerability in Microsoft Windows OS. The EternalBlue exploit has been patched by Microsoft on March 14 and made publicly available through the Shadowbrokers dump on April 14th, 2017. However, many companies and public organizations have not yet installed the patch to their systems. The Microsoft patches for legacy versions of Windows were released last week after the attack. How to prevent WannaCry infection? Make sure that all hosts have enabled endpoint anti-malware solutions. Install the official Windows patch (MS17-010) https://technet.microsoft.com/en-us/library/security/ms17-010.aspx, which closes the SMB Server vulnerability used in this ransomware attack. Scan all systems. After detecting the malware attack as MEM:Trojan.Win64.EquationDrug.gen, reboot the system. Make sure MS17-010 patches are installed. Backup all important data to an external hard drive or cloud storage service. More information here: https://malwareless.com/wannacry-ransomware-massively-attacks-computer-systems-world/"} +{"doc_id": 165505, "author": "Bo Lawson", "text": "/storage/emulated/0/Download is the actual path to the files. /sdcard/Download is a symlink to the actual path of /storage/emulated/0/Download However, the actual files are located in the filesystem in /data/media, which is then mounted to /storage/emulated/0 (and often other mountpoints as well) A Symlink In computing, a symbolic link is a term for any file that contains a reference to another file or directory in the form of an absolute or relative path and that affects pathname resolution. Symbolic links were already present by 1978 in minicomputer operating systems from DEC and Data Generals RDOS."} +{"doc_id": 591492, "author": "AndyO", "text": "Cisco has posted an article on this that goes into more detail than any of the others Ive seen. Their basic steps for prevention are as follows: Ensure all Windows-based systems are fully patched. At a very minimum, ensure Microsoft bulletin MS17-010 has been applied. In accordance with known best practices, any organization who has SMB publically accessible via the internet (ports 139, 445) should immediately block inbound traffic. And at least based on that Microsoft bulletin, it would seem that this is a SMBv1 vulnerability, not SMBv2."} +{"doc_id": 1668, "author": "Am1rr3zA", "text": "If you use Firefox 3.5 or later: Go to Preferences \u2318, and and choose the Application pane. Find mailto in the content type list, and choose what you want. If you want Gmail to open, choose it in the popup menu. Or you can use More Internet, a System Preferences pane that lets you choose which applications are set as helpers for Internet protocols."} +{"doc_id": 263812, "author": "Cory Klein", "text": "I find the most convenient method of editing multiple files is by using tabs. You can open multiple files in separate tabs via the command line like so: vim -p file1.txt file2.txt Or if you already have vim open, you can open a new file in a new tab like so: :tabe file2.txt Once you have the tabs open, use gt to view the next tab and gT to view the previous tab. You can also jump to the first tab with 1gt, the second tab with 2gt, etc. You can close tabs using :tabc Finally you can move the current tab to the nth location with :ntabm where n is any number greater than or equal to 0."} +{"doc_id": 1672, "author": "Guillermo Esteves", "text": "With Webmailer, you can set up any webmail client as the default email client in OS X."} +{"doc_id": 591501, "author": "dark_st3alth", "text": "The ransomware is using a known, publicly disclosed exploit in SMBv1 (Server Message Block Version 1). It is an application level protocol used for sharing files and printers in a networked environment. The SMBv1 protocol is commonly found in networked Windows environments, and includes operating systems such as Windows XP, Windows 7, 8, 8.1, and 10. Windows Vista and onward allow for the use of SMBv1, even though they support the improved SMBv2 and v3 protocols. Those environments who do not use Microsofts implementation, are unlikely to be affected by the exploit and related vulnerabilities. In addition, those environments that do not support SMBv1 are also not affected. You can disable SMBv1 support, as per Microsofts directions: https://support.microsoft.com/kb/2696547 Those running Windows 8.1 or Windows Server 2012 R2 and later can disable the support by removing the Windows Feature for SMB1.0/CIFS File Sharing Support. There are six major vulnerabilities in Microsofts implementation of SMBv1. The first five (and more critical) are ones that allow for remote arbitrary code execution. The last one allows for data disclosure. The ransomware leverages the first five vulnerabilities and exploits them. Measures users/enterprises can take to mitigate this ransomware and others includes: Make sure systems are patched, the vulnerabilities were patched in March of 2017. Keep a recent backup of your system or critical user/business data. Use and maintain an anti-virus solution Use a backup scheme such as GFS (Grandfather, father, son). Remove the use or support of SMBv1 (see above). Segregate the network such that damage impact is lessened. Use a diverse set of systems and operating systems if possible. Web Links: https://technet.microsoft.com/en-us/library/security/ms17-010.aspx http://msdn.microsoft.com/en-us/library/aa365233(VS.85).aspx http://www.eweek.com/security/wannacry-ransomware-attack-hits-victims-with-microsoft-smb-exploit"} +{"doc_id": 231053, "author": "naugtur", "text": "Theres a new tool called progress that can find any descriptor related to a running command and show progress and speed: available here progress -w outputs the stats for all running cp,mv etc. operations"} +{"doc_id": 329361, "author": "kenorb", "text": "git grep Here is the syntax using git grep combining multiple patterns using Boolean expressions: git grep --no-index -e pattern1 --and -e pattern2 --and -e pattern3 The above command will print lines matching all the patterns at once. --no-index Search files in the current directory that is not managed by Git. Check man git-grep for help. See also: How to use grep to match string1 AND string2? Check if all of multiple strings or regexes exist in a file. For OR operation, see: How do I grep for multiple patterns with pattern having a pipe character? Grep: how to add an OR condition?"} +{"doc_id": 67218, "author": "Markus L", "text": "Easiest solution is to create a script similar to the one above with just one line like this: tell application Messages to clear unread messages"} +{"doc_id": 231059, "author": "unxnut", "text": "It is easy enough to do using regular expression: n=2 echo lkj | sed s/\\(.*\\).\\{$n\\}/\\1/"} +{"doc_id": 1685, "author": "Jonik", "text": "Update (1/2014) Turns out Google Notifier for Mac is no longer supported. If youre using Chrome, this is now very simple: open settings, search for mailto, and set Gmail as the handler for mailto. For Firefox, see this answer. (The remainder of this answer is now obsolete.) While options like Webmailer also do the job, Ill mention yet another way for the sake of completeness: Install Google Notifier for Mac. Its an official Gmail utility made by Google. Open Mail.app, go to Preferences -> General, and set Default email reader to Google Notifier.app. (Yep, you need to configure this in Mail even when Mail is what you dont want to use...) Besides directing clicks on mailto: links to Gmail, the Google Notifier comes with some additional features: It adds an icon (like this: ) in the menu bar and notifies you (if configured to do so) about new mail in your Gmail box. Through the icon you can also access your inbox, unread messages, and Compose mail screen quickly. Works great for me; I can generally vouch for this useful little app."} +{"doc_id": 165525, "author": "Irfan Latif", "text": "/storage/emulated/0/ is actually /data/media/0/ exposed through an emulated / virtual filesystem, not the actual one. This is with reference to my previous answer here, but with more relevant details. ANDROID STORAGE: On Android 5: /sdcard >S> /storage/emulated/legacy >S> /mnt/shell/emulated/0 /mnt/shell/emulated >E> /data/media On Android 6+: # for (Java) Android apps (running inside zygote virtual machine) # /storage to VIEW bind mount is inside a separate mount namespace for every app /sdcard >S> /storage/self/primary /storage/self >B> /mnt/user/USER-ID /mnt/user/USER-ID/primary >S> /storage/emulated/USER-ID /storage/emulated >B> /mnt/runtime/VIEW/emulated /mnt/runtime/VIEW/emulated >E> /data/media # for services/daemons/processes in root/global namespace (VIEW = default) /sdcard >S> /storage/self/primary /storage >B> /mnt/runtime/default /mnt/runtime/default/self/primary >S> /mnt/user/USER-ID/primary /mnt/user/USER-ID/primary >S> /storage/emulated/USER-ID /storage/emulated >B> /mnt/runtime/default/emulated /mnt/runtime/default/emulated >E> /data/media * >S> for symlink, >E> for emulated and >B> for bind mount * USER-ID of current user in case of Multiple Users or Work Profile, normally 0 i.e. that of device owner * VIEW is one of read (for apps with permission.READ_EXTERNAL_STORAGE) or write (permission.WRITE_EXTERNAL_STORAGE) or default (for processes running in root/global mount namespace i.e. outside zygote) * There were minor differences on previous Android versions but the concept of emulation was same ever since implemented. * For a little bit more details on Androids mount namespace implementation, see this answer. In short, /sdcard and /storage/emulated/0 - which represent a FAT/vFAT/FAT32 filesystem - point towards /data/media/0 (or /mnt/expand/[UUID]/media/0 in case of Adoptable Storage) through FUSE or sdcardfs emulation. Being not Android specific but generally Linux related, symlink and bind mount (see Creating a bind mount) are out of the scope of this question, as the question is about emulation part mainly. EMULATION: Why the emulation is here? Emulated filesystem is an abstraction layer on actual filesystem (ext4 or f2fs) that serves basically two purposes: Retain USB connectivity of Android devices to PCs (implemented through MTP now a days) Restrict unauthorized access of apps/processes to users private media and other apps data on SD card. Read Androids Storage Journey for details, the summary is: Early Android devices were short on internal storage and relied on (physically) external SD cards that traditionally use FAT family of filesystem to ensure compatibility with most of the PCs (refer to Microsofts dominance on PC world). When the internal storage grew in size, same filesystem was shifted to internal (still called external) SD card. But the FAT/vFAT implementation had two major issues which were addressed by Google gradually: Android devices were connected to PCs directly (USB Mass Storage) just as we connect a USB drive these days. UMS exposes the device at block level and disconnects the SD card from Android framework (un-mounts), thus making whole data unavailable to apps and possibly breaking many functionalities. FAT (being Windows favorite in development days) was never designed to enforce UNIX permissions (mode, uid, gid and likewise symlinks, and ioctls like FS_IOC_FIEMAP). So, all data on SD card was available to all apps (since every Android app is a UNIX/Linux user and has a uid) with no restrictions, hence raising serious privacy and security concerns. Both of these issues were addressed through emulation: Actual SD card storage was moved to /data partition (or independent /sdcard partition on some devices previously) which holds ext4 filesystem (gradually being replaced by f2fs), fully implementing UNIX permissions. This design made using UMS impossible because whole /data partition could not be exposed to PC for 2 more reasons: (1) it contains a lot of settings and apps data which is to be protected from other apps as well as human users. (2) Linux filesystems are not supported by Windows. So UMS was replaced with Media Transfer Protocol which is a client-server type extension to PTP - an already established protocol. MTP doesnt expose block device but works through software stack. MTP host runs on Android as an app (android.process.media) fully sandboxed in Android framework, not capable of doing any escalated tasks. Now the apps (and MTP, which is also an app) interact with emulated storage instead of /data/media, achieving both purposes at the same time i.e. enforcing permission checks underneath and looking like FAT filesystem on upper surface. Google is now implementing emulation through sdcardfs to overcome shortcomings of FUSE; one major being the input/output overhead i.e. to improve read/write speeds. EXTERNAL STORAGE PERMISSIONS: Concept of Public and Private files on external storage can be demonstrated using an example: Install Termux app. Create directories /sdcard/Android/data/com.termux/test_dir and /sdcard/test_dir. Create files /sdcard/Android/data/com.termux/test_file and /sdcard/test_file. Execute following commands: * You should have WhatsApp installed or select some other apps private folder. Now Force Stop the Termux app and grant Storage permission. Execute the commands again: See the difference in permissions of same files and directories. This seems not to be simply possible without emulation on a native Linux filesystem when there are hundreds of apps (users) to be dealt with simultaneously. This is the filesystem emulation that lets the same file to be exposed with three different sets of permissions at same time independent of its original permissions on actual filesystem: # touch /data/media/0/test_file # stat -c %a %u %g %n /data/media/0/test_file 644 1023 1023 /data/media/0/test_file # stat -c %a %u %g %n /mnt/runtime/*/emulated/0/test_file 660 0 1015 /mnt/runtime/default/emulated/0/test_file 640 0 9997 /mnt/runtime/read/emulated/0/test_file 660 0 9997 /mnt/runtime/write/emulated/0/test_file Also see What is the \u201cu#_everybody UID? Related: Why is the storage/emulated/ directory inaccessible? How can I make a symlink (or equivalent) inside /storage/emulated/0?"} +{"doc_id": 231061, "author": "steeldriver", "text": "In a POSIX shell, the syntax ${t:-2} means something different - it expands to the value of t if t is set and non null, and otherwise to the value 2. To trim a single character by parameter expansion, the syntax you probably want is ${t%?} Note that in ksh93, bash or zsh, ${t:(-2)} or ${t: -2} (note the space) are legal as a substring expansion but are probably not what you want, since they return the substring starting at a position 2 characters in from the end (i.e. it removes the first character i of the string ijk). See the Shell Parameter Expansion section of the Bash Reference Manual for more info: Bash Reference Manual \u2013 Shell Parameter Expansion"} +{"doc_id": 1691, "author": "Benedict Cohen", "text": "I use Spark and have \u2318+\u2325+\u00a7 (I have a UK keyboard layout; \u00a7 is just below esc) to launch the terminal. I prefer it to using Services/AppleScripts because its faster. Also it doesnt add any visible UI elements like other solutions (Im very anal about keeping my workspace as streamlined as possible). I think development for Spark has stopped but it works perfectly on Snow Leopard."} +{"doc_id": 231069, "author": "cuonglm", "text": "With bash 4.2 and above, you can do: ${var::-1} Example: $ a=123 $ echo ${a::-1} 12 Notice that for older bash ( for example, bash 3.2.5 on OS X), you should leave spaces between and after colons: ${var: : -1}"} +{"doc_id": 1696, "author": "Chris Quenelle", "text": "I use a program called CoRD. Its worked well for me without fussing with any configuration files or properties. Its been a while since I tried any alternatives, so Ill give RDC another try. I dont remember why I rejected it before."} +{"doc_id": 263842, "author": null, "text": "Im surprised to not see readarray mentioned. It makes this very easy when used in combination with the <<< operator: $ touch oneword two words $ readarray -t files <<<$(ls) $ for file in ${files[@]}; do echo |$file|; done |oneword| |two words| Using the <<<$expansion construct also allows you to split variables containing newlines into arrays, like: $ string=$(dmesg) $ readarray -t lines <<<$string $ echo ${lines[0]} [ 0.000000] Initializing cgroup subsys cpuset readarray has been in Bash for years now, so this should probably be the canonical way to do this in Bash."} +{"doc_id": 329385, "author": "kenorb", "text": "Pipe (|) is a special shell character, so it either needs to be escaped (\\|) or quoted as per manual (man bash): Quoting is used to remove the special meaning of certain characters or words to the shell. It can be used to disable special treatment for special characters, to prevent reserved words from being recognized as such, and to prevent parameter expansion. Enclosing characters in double quotes preserves the literal value of all characters within the quotes A non-quoted backslash (\\) is the escape character. See: Which characters need to be escaped in Bash? Here are few examples (using tools not mentioned yet): Using ripgrep: rg foo|bar *.txt rg -e foo -e bar *.txt Using git grep: git grep --no-index -e foo --or -e bar Note: It also supports Boolean expressions such as --and, --or and --not. For AND operation per line, see: How to run grep with multiple AND patterns? For AND operation per file, see: How to check all of multiple strings or regexes exist in a file?"} +{"doc_id": 1711, "author": "hujunfeng", "text": "I love Apptivate, it is like Spark. Apptivate lets you assign system wide shortcuts to any application, document, or script file. It just does this one thing, and does it pretty well. Its very small, so doesnt use much system resource. One great feature of Apptivate is that if it detects the application, say Terminal.app, that has already been running, Apptivate will hide it, instead of launching a new instance of the application."} +{"doc_id": 132785, "author": "Lie Ryan", "text": "Do you happen to have enabled Settings > Application > Development > Stay Awake (screen will never sleep when charging)?"} +{"doc_id": 591539, "author": "symcbean", "text": "While installing vendor patches is always a good idea, its also worth noting that the malware carries a DNS check on activation. Ive seen one reported domain: www.iuqerfsodp9ifjaposdfjhgosurijfaewrwergwea.com But its likely that there maybe more. Hence it should be possible to monitor your network for new infections using something like this (on a Linux/Unix box) which tests for a very long string as a domain component in a DNS query: tcpdump -K dst port 53 | awk $8 ~ /[^\\.]{20,}/ { print $0; } (not tested: YMMV)"} +{"doc_id": 591541, "author": "Soufiane Tahiri", "text": "Its also important to know that there are new variants of Wannacry (dubbed Wannacry v2) which is believed to not be from the same authors. How this malware compromises systems: First it creates and sets the following registry entries: HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run\\Microsoft Updates Task Scheduler = [PATH_TO_RANSOMEWARE][TRANSOMEWARE_EXE_NAME] /r HKEY_LOCAL_MACHINE\\SOFTWARE\\WannaCryptor\\wd = [PATH_TO_RANSOMEWARE] HKEY_CURRENT_USER\\Control Panel\\Desktop\\Wallpaper = %UserProfile%\\Desktop!WannaCryptor!.bmp WannaCry then creates the following mutexes: Global\\WINDOWS_TASKOSHT_MUTEX0 LGlobal\\WINDOWS_TASKCST_MUTEX After this, it terminates the following processes using taskkill /f /im: sqlwriter.exe sqlserver.exe Microsoft.Exchange.* MSExchange* WannaCry starts searching, encrypting and appending .WCRY to the end of the file names of the following file-formats: .123 .3dm .3ds .3g2 .3gp .602 .7z .ARC .PAQ .accdb .aes .ai .asc .asf .asm .asp .avi .backup .bak .bat .bmp .brd .bz2 .cgm .class .cmd .cpp .crt .cs .csr .csv .db .dbf .dch .der .dif .dip .djvu .doc .docb .docm .docx .dot .dotm .dotx .dwg .edb .eml .fla .flv .frm .gif .gpg .gz .hwp .ibd .iso .jar .java .jpeg .jpg .js .jsp .key .lay .lay6 .ldf .m3u .m4u .max .mdb .mdf .mid .mkv .mml .mov .mp3 .mp4 .mpeg .mpg .msg .myd .myi .nef .odb .odg .odp .ods .odt .onetoc2 .ost .otg .otp .ots .ott .p12 .pas .pdf .pem .pfx .php .pl .png .pot .potm .potx .ppam .pps .ppsm .ppsx .ppt .pptm .pptx .ps1 .psd .pst .rar .raw .rb .rtf .sch .sh .sldm .sldx .slk .sln .snt .sql .sqlite3 .sqlitedb .stc .std .sti .stw .suo .svg .swf .sxc .sxd .sxi .sxm .sxw .tar .tbk .tgz .tif .tiff .txt .uop .uot .vb .vbs .vcd .vdi .vmdk .vmx .vob .vsd .vsdx .wav .wb2 .wk1 .wks .wma .wmv .xlc .xlm .xls .xlsb .xlsm .xlsx .xlt .xltm .xltx .xlw .zip For prevention Nik gave you all you need to know but Ill add that you should try to block inbound connections on port 445/TCP. Make sure not to block the following sinkhole domain, as this is the kill switch found in the Wannacry v1 binary: hxxp://www[.]iuqerfsodp9ifjaposdfjhgosurijfaewrwergwea[.]com Hope it helps."} +{"doc_id": 591552, "author": "IAmBarry", "text": "It seems to be a both a standard phishing/ransomware attack but its also spreading like a worm once it gets into a target network. Windows servers are typically behind firewalls that dont pass SMB. Once the first machine on a protected network is infected the worm propagates the attack usning the SMB exploit noted above. Id like to get confirmation on the phishing side of the attack. Microsoft (as of two days ago) still didnt have info on the initial compromise : We haven\u2019t found evidence of the exact initial entry vector used by this threat, but there are two scenarios that we believe are highly possible explanations for the spread of this ransomware: Arrival through social engineering emails designed to trick users to run the malware and activate the worm-spreading functionality with the SMB exploit Infection through SMB exploit when an unpatched computer is addressable from other infected machines (https://blogs.technet.microsoft.com/mmpc/2017/05/12/wannacrypt-ransomware-worm-targets-out-of-date-systems/) [Edit] Just saw that Forbes doesnt think Phishing is a major component of this attack. see https://www.forbes.com/sites/thomasbrewster/2017/05/12/nsa-exploit-used-by-wannacry-ransomware-in-global-explosion/#37038021e599 : ...its unlikely phishing emails were the primary infection method, given few have shared emails laced with the malware. Ciscos Talos division does not believe any phishing emails were used... So that would leave unprotected servers with SMB ports exposed to the open internet as the primary infection vector. That might explain some of the high profile targets reported who have widely spread networks (FedEx, NHS, etc). It would only take one unexposed computer that also connected to a wider network to bootstrap an infection."} +{"doc_id": 263877, "author": "user138692", "text": "For running applications the file /proc/1234/maps contains all actual dynamically linked libraries. Where 1234 is the pid of the running executable. Linux follows LD_LIBRARY_PATH and other variables, as pointed out in answer by Gilles."} +{"doc_id": 329417, "author": "AbstProcDo", "text": "shift treat command line arguments as a FIFO queue, it popleft element every time its invoked. array = [a, b, c] shift equivalent to array.popleft [b, c] $1, $2,$3 can be interpreted as index of the array. $# is the length of array"} +{"doc_id": 591583, "author": "dr_", "text": "In addition to the preceding answers, which mention only Windows, and since theres a dup-closed question Does WannaCry infect Linux? pointing to this one, Id like to add that Linux machines can get infected too if theyre running Wine: https://twitter.com/hackerfantastic/status/863359375787925505"} +{"doc_id": 624359, "author": "Ron Maupin", "text": "iBGP requires a full mesh or use of mitigation like confederations or route reflectors, BGP doesnt converge with anything like the speed of OSPF, etc. Each OSPF router would have a full understanding of all the routes that are in the area in which it resides without needing a full mesh, and it converges very, very quickly. Using an IGP is recommended with iBGP. Without the IGP, iBGP must neighbor on external-facing interfaces, with an IGP, iBGP can neighbor on loopback interfaces which never go down, and can have multiple paths to reach. I have seen iBGP-only for local routing, but it is more difficult and fragile."} +{"doc_id": 67304, "author": "Justin", "text": "For many it is because of a WiFi/Bluetooth conflict. Go to the \uf8ff Apple menu and choose System Preferences Go to the Network control panel Select \u201cBluetooth PAN from the list of network interfaces on the left side menu Hit the Delete key or the [-] minus button to remove the Bluetooth PAN interface Note: Bluetooth PAN is for connecting to your iPhone for things like Instant Hotspot and can easily be re-added using the [+] on the same screen if you need it later."} +{"doc_id": 394992, "author": "Frank Shearar", "text": "A circular reference is twice the coupling of a non-circular reference. If Foo knows about Bar, and Bar knows about Foo, you have two things that need changing (when the requirement comes that Foos and Bars must no longer know about each other). If Foo knows about Bar, but a Bar doesnt know about Foo, you can change Foo without touching Bar. Cyclical references can also cause bootstrapping problems, at least in environments that last for a long time (deployed services, image-based development environments), where Foo depends on Bar working in order to load, but Bar also depends on Foo working in order to load."} +{"doc_id": 132853, "author": "newuser", "text": "The default location where Dropbox will place downloaded files is /mnt/sdcard/download on my OG Droid. I cant imagine it changes much between phone models."} +{"doc_id": 591606, "author": "WinEunuuchs2Unix", "text": "NHS was doomed to be first one hit There are many great answers here but this answer is enlightening given recent events. On January 18th, 2017 US-Cert urged admins to firewall off SMBv1 but comments on this story says the only reason Windows XP support is still around is because the NHS (UKs National Health Services which got shutdown on Friday May 12th) pays M$ tons of cash to keep it alive. One link for all off support Windows vulnerable versions If you have an older Windows Vista backup laptop like myself, you might be interested in KB4012598 for Windows 8, XP, Vista, Server 2008 and Server 2003 which are equivalents to much talked about MS17-010. These are manual patches for EOL (End of Life) Windows versions off of support and automatic updates. Microsoft took the extraordinary step of releasing these patches over the last 48 hours. Linux users can be effected too If there are Linux users reading this answer Id like to point out vulnerabilities discussed in Ask Ubuntu on this Question I posted. Technical details not listed in other answers This article discusses blocking specific ports and disabling SMBv1 and SMBv2 in favour of SMBv3. Part of the article states the FBI says you shouldnt pay the criminals to get your data back but in all honesty I would pay 300 bucks to get my life back. Spooky coincidences The Shadow Brokers have made 31 grand so far according to one article today. Interesting fact the name first appeared (AFAIK) as a fictional group wheeling and dealing in secrets in a Sci-Fi video game invented in Edmonton about 10 years ago. Second interesting fact they charge $300 to unlock your ransomed data and I used to charge $300 for data repairs of GL, AR, IC, PR, etc. That said I highly doubt the Shadow Brokers are based out of Edmonton where I live. Version two is out and kill switch wont work The creation of the website http://iuqerfsodp9ifjaposdfjhgosurijfaewrwergwea.com/ which operates as a kill-switch to the ransomware is reported to have been side-stepped by a new version of Wanna Cry. I havent read many articles confirming this but in any respect the SMBv1 and SMBv2 holes should be plugged. People shouldnt rely on the kill-switch working with future Wanna Cry versions or any new malware / ransomware utilizing the loop-hole. If you wonder what the kill-switch website benignly says, it is: sinkhole.tech - where the bots party hard and the researchers harder... Microsoft Conspiracy Theories Those that dont believe in conspiracies can press the back button. The NSA and Microsoft knew this was coming according to this article circulating a petition demanding to know what Microsoft knew, when, where and how. The allegations are based on the timing of Shadow Brokers, NSA getting hacked and MS security updates."} +{"doc_id": 67333, "author": "Scott H", "text": "IntelliJ IDEA is overall a good IDE, its available on Mac, and it supports editing XML in ways such as syntax highlighting, collapsing a given scope, and validating XML. I havent used it much for XML, but Ive had good experiences with that IDE for other purposes so thought I would mention it here."} +{"doc_id": 1798, "author": "Michiel de Mare", "text": "In Maps, you can switch to street view by dropping a pin, tapping it, and then tapping the little orange guy."} +{"doc_id": 67339, "author": "Robert Krupp", "text": "Update for 10.10: When you are in the Show View Options Menu, the Snap to Grid setting has been moved to the Sort By: drop down bar."} +{"doc_id": 395022, "author": "Aaronaught", "text": "There are a great many things wrong with circular references: Circular class references create high coupling; both classes must be recompiled every time either of them is changed. Circular assembly references prevent static linking, because B depends on A but A cannot be assembled until B is complete. Circular object references can crash na\u00efve recursive algorithms (such as serializers, visitors and pretty-printers) with stack overflows. The more advanced algorithms will have cycle detection and will merely fail with a more descriptive exception/error message. Circular object references also make dependency injection impossible, significantly reducing the testability of your system. Objects with a very large number of circular references are often God Objects. Even if they are not, they have a tendency to lead to Spaghetti Code. Circular entity references (especially in databases, but also in domain models) prevent the use of non-nullability constraints, which may eventually lead to data corruption or at least inconsistency. Circular references in general are simply confusing and drastically increase the cognitive load when attempting to understand how a program functions. Please, think of the children; avoid circular references whenever you can."} +{"doc_id": 198419, "author": "MIrra", "text": "This works for me: ls -la /dev/disk/by-uuid If you want to check what type the partition is, use: df -Th and it will show you if you have ext3 or ext2. Today it helped me because there was a formatted ext2 partition and I thought it was ext3, which was causing the mount to fail."} +{"doc_id": 395037, "author": "Fishtoaster", "text": "Yes. Your SO profile gives potential employers a look at you actually coding, as well as your ability to solve problems and seek help. If you have any amount of reputation there, Id say put it on. Either a URL alongside your contact info, or a bullet under Misc/Other with your rep."} +{"doc_id": 395040, "author": "Josh K", "text": "No I would not put any websites on my Resume. I will make reference to projects or applications I have done and other information that is readily applicable to the person reviewing the resume. My resume is a one page summary of me, I dont have room for the half dozen SO family accounts, LinkedIn, my blog, Facebook, Twitter, Tumblr, Digg, Reddit, (you get the picutre). Putting I have XXX reputation on stackoverflow.com is just like saying I have 8.3k posts on myfavoriteforum.com. Its not professional. Deliberately pointing people elsewhere for information is also, in my view, wrong. What do you want to gain from adding a link to your LinkedIn profile? Work history? That should be on your resume. People you know? Connections? I guess it would be some indicator of how well you are known in the community, but honestly that information should be found elsewhere. If they care, they can visit my website where I have an Imprint page linking to various online profiles. I get more referrals in regards to my blog then I do from my stackoverflow reputation ranking. Im on page one of superuser.com but have never had someone drop me an email because of it. Reputation is great however it is not applicable to your job. If I notice Im writing an extraordinarily good answer to something I will turn it into a blog post. Sometimes Ill even turn it into a full blown application. This gets far more recognition and attention then crawling through your answers. My second highest rated answer on stackoverflow is a XKCD comic. My highest rated answer on programmers is an explanation of a brace. These are not answers Im especially proud of or feel like I should show off."} +{"doc_id": 395041, "author": "luis.espinal", "text": "As I commented on Josh Ks answer (a good one to take into consideration mind you), it depends on the type of content that you put in via your stackoverflow account. You can generalize that to the type of content present on your identifiable internet presence. I would probably not do it, not because it is a bad idea (nor I feel my content is inapropriate), but because Im already including my linkedin profile and tech blog in my CV. Potential employers can then use that to discern what Ive posted on SO, ServerFault or say, cstheory.stackexchange or even slashdot if they so wish. I try to create mostly good content but there are a couple of WTFs here and there. I dont worry about them. If my content however contained a high ratio of flamewards and LOLZ, then I would be worried. I used to have one like that before under a pseudonym (younger, stupider days), but that has been completely replaced with more professional content under my actual name. The older one can still be found if one is really persistent, but then, unless Im applying for, I dunno, a top secret gig or something like that, I would not be happy if an employer goes to that extreme of finding my much older content. Neither would I accept to continue an interview or work with such a company. Probabilistically speaking, I doubt that such a thing is a common occurrence for employers worth working for (except for jobs with valid reasons to perform extensive background checks.) So, it is a personal call whether to include a SOs profile on your CV. It depends on what type of content you have. My personal feeling is that I would prefer to have a well-maintained linkedin profile or tech blog, have it with links to other tech-oriented profiles of yours, and have that (your linkedin profile or a technical blog) included in your CV."} +{"doc_id": 329506, "author": "alpha_989", "text": "If you have vim open, its better to use the vim copy paste to copy text. Tmux copy paste will work, however there are several disadvantages. First, when copying multiple lines of text, you will have to select multiple lines. Tmux copy paste typically doesnt understand that vim has line numbers or there are markings in the vim gutter (such as relative numbers/absolute numbers). As a result, it will copy those as well. Secondly, I have found that if the text has certain language specific formatting, and you directly try to copy paste using tmux to vim, it will mess up the formatting. This can be avoided by using set paste!. However, this requires a lot more work. Use +y to copy and +p to paste Vim natively provides a method to copy paste between the vim buffer and the system clipboard, using either +y or *y. Then pasting using +p or *p. This will avoide copying the relative numbers in the gutter or other random marks which are not relevant to vim"} +{"doc_id": 67363, "author": "brki", "text": "On OS X 10.10 Yosemite, I used this command: sudo launchctl config user path Be aware that his sets the launchtl PATH for all users. This worked well for my use case. Note that youll be asked to reboot your machine for the effects to take hold. You must restart all applications for this to have effect. (It doesnt apply to applications that are reopened at login after rebooting.) (Thanks @Brecht Machiels.)"} +{"doc_id": 34601, "author": "jwatt", "text": "What does work for me is: echo -n This file system is case->tmp; echo -n in>>TMP; echo sensitive>>tmp; cat tmp"} +{"doc_id": 493357, "author": null, "text": "When your build server (you are using CI, right?) creates a build, where does it pull from? Sure, an integration build you could argue does not need one true repo but surely a distribution build (i.e. what you give to the customer) does. In other words: fragmentation. If you designate one repo as the repo and appoint guardians who vet pull requests, you have an easy way to satisfy the request of give me a software build or I am new to the team, where is the code? The strength of DVCS is not so much the peer-to-peer aspect of it, but the fact that it is hierarchical. I modify my workspace, then I commit to local. Once I have a feature complete, I merge my commits and push them to my remote. Then anyone can see my tentative code, provide feedback, etc. before I create a pull request and a project admin merges it into the One True repo. With traditional CVCS you either commit or you dont. That is fine for some workflows (I use both VCS types for different projects), but falls flat on its face for a public or OSS project. The key is DVCS has multiple steps, which are more work, but provide a better way to integrate code from strangers through a built-in process that allows better visibility into what is being checked in. Using it in a centralized manner means you can still have that gold standard of the current state of the project while also providing a better code sharing mechanism."} +{"doc_id": 493360, "author": "Bryan Oakley", "text": "Ultimately, you are building a product. This product represents your code at a single point in time. Given that, your code must coalesce somewhere. The natural point is a ci server or central server from which the product is built, and it makes sense that this central point is a git repository."} +{"doc_id": 395056, "author": "gablin", "text": "Maybe not worth from a work market perspective, but you might want to have a look at it just to get a feel of how stuff was done in the good ol day. ^^"} +{"doc_id": 493362, "author": "Michael Hampton", "text": "Ahh, but in fact you are using git in a decentralized manner! Let us compare gits predecessor in mindshare, svn. Subversion had only one repo, one source of truth. When you did a commit, it was to a single, central repo, to which every other developer was committing as well. This sort of worked, but it led to numerous problems, the biggest one being the dreaded merge conflict. These turned out to be anywhere from annoying to nightmarish to resolve. And with one source of truth, they had a nasty habit of bringing everyones work to a screeching halt until they were resolved. Merge conflicts certainly exist with git, but they are not work-stopping events and are much easier and faster to resolve; they generally affect only the developers involved with the conflicting changes, rather than everyone. Then there is the whole single-point-of-failure, and the attendant problems that brings. If your central svn repo dies somehow, youre all screwed until it can be restored from backup, and if there were no backups, youre all doubly screwed. But if the central git repo dies, you can restore from backup, or even from one of the other copies of the repo which are on the CI server, developers workstations, etc. You can do this precisely because they are distributed, and each developer has a first-class copy of the repo. On the other hand, since your git repo is a first-class repo in its own right, when you commit, your commits go to your local repo. If you want to share them with others, or to the central source of truth, you must explicitly do this with a push to a remote. Other developers can then pull down those changes when its convenient for them, rather than having to check svn constantly to see if someones done something that will screw them up. The fact that, instead of pushing directly to other developers, you push changes to them indirectly via another remote repo, doesnt matter much. The important part from our perspective is that your local copy of the repo is a repo in its own right. In svn, the central source of truth is enforced by the design of the system. In git, the system doesnt even have this concept; if there is a source of truth, it is decided externally."} +{"doc_id": 493363, "author": "Steve Jessop", "text": "I dont know how you define everyone, but my team has a central repo on a server and also from time to time we pull from other colleagues repos without going via that central repo. When we do this we do still go via a server, because we choose not to email patches about the place, but not via the central repo. This generally happens when a group is collaborating on a particular feature and wants to keep up to date with each other, but as yet has no interest in publishing the feature to everyone. Naturally since we arent secretive silo-workers those situations dont last long, but DVCS provides the flexibility to do whatever is most convenient. We can publish a feature branch or not according to taste. But 90%+ of the time, sure, we go via the central repo. When I dont care about any particular change or particular colleagues work its more convenient, and it scales better, to pull all my colleagues changes that have been vetted in the central repo, rather than separately pulling changes from each of N colleagues. DVCS isnt trying to prevent pull from main repo being the most common workflow, its trying to prevent it being the only available workflow. Distributed means that all repos are technically equivalent as far as the git software is concerned, but it doesnt follow that they all have equal significance as far as developers and our workflows are concerned. When we release to clients or to production servers, the repo we use to do that has a different significance from a repo used only by one developer on their laptop. If truly decentralized means there are no special repos then I dont think thats what Linus means to champion, given that in point of fact he does maintain special repos that are more important in the grand scheme of things, than is some random clone of Linux that I made yesterday and plan to use only to develop some little patch and then delete it once hes accepted the patch. git doesnt privilege his repo over mine, but Linus does privilege it. His is the current state of Linux, mine isnt. So naturally changes tend to go through Linus. The strength of DVCS over centralized VCS isnt that there must not be a de facto centre, its that changes dont have to go through any centre because (conflicts permitting) anyone can merge anything. DVCS systems are also forced, because they are decentralized, to provide certain convenient features based around the fact that you necessarily must have a complete history (i.e. a repo) locally in order to do anything. But if you think about it theres no fundamental reason why you couldnt configure a centralized VCS with a local cache that keeps the whole history for read-only operations permitted to be out of date (I think Perforce has an option for this mode, but Ive never used Perforce). Or in principle you could configure git with your .git/ directory on a remote-mounted filesystem in order to emulate the feature of SVN that it doesnt work when you dont have a network connection. In effect, DVCS forces the plumbing to be more robust than you can get away with in a centralized VCS. This is a (very welcome) side effect and helped motivate DVCS design, but this distribution of responsibility at the technical level isnt the same as fully decentralizing all human responsibility."} +{"doc_id": 395061, "author": "Chris Knight", "text": "From a personal perspective I would say that there are better things to learn first. However, many large companies have very large investments in their COBOL code base which theyll probably never really be able to leave behind, creating an industry for COBOL programmers to maintain the code base as well as write new code. The company I work for is a large financial company and our technology split for developers is roughly 30% COBOL, 40% Java and 30%C#."} +{"doc_id": 231222, "author": "Torger597", "text": "This was explained in this question: https://superuser.com/questions/22535/what-is-randomart-produced-by-ssh-keygen. It doesnt really have any use for the user generating the key, rather its for ease of validation. Personally. would you rather look at this: (Please note this is a host key example) 2048 1b:b8:c2:f4:7b:b5:44:be:fa:64:d6:eb:e6:2f:b8:fa 192.168.1.84 (RSA) 2048 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48 gist.github.com,207.97.227.243 (RSA) 2048 a2:95:9a:aa:0a:3e:17:f4:ac:96:5b:13:3b:c8:0a:7c 192.168.2.17 (RSA) 2048 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48 github.com,207.97.227.239 (RSA) Which, being a human, itd take you a good while longer to verify, or this: 2048 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48 gist.github.com,207.97.227.243 (RSA) +--[ RSA 2048]----+ | . | | + . | | . B . | | o * + | | X * S | | + O o . . | | . E . o | | . . o | | . . | +-----------------+ 2048 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48 github.com,207.97.227.239 (RSA) +--[ RSA 2048]----+ | . | | + . | | . B . | | o * + | | X * S | | + O o . . | | . E . o | | . . o | | . . | +-----------------+ Examples pulled from http://sanscourier.com/blog/2011/08/31/what-the-what-are-ssh-fingerprint-randomarts-and-why-should-i-care/ Essentially, the random art generated by the users keys can also be used in the same sort of way. If the image generated initially is different from the current image of the key, for example if you had moved a key, then the key had likely been tampered with, corrupted, or replaced. This, from the other question is a really good read: http://users.ece.cmu.edu/~adrian/projects/validation/validation.pdf"} +{"doc_id": 493367, "author": "try-catch-finally", "text": "Why does everyone use git in a centralized manner? Weve never met, how comes that you say everyone? ;) Secondly, there are more other features that you find in Git but not in CVS or SVN. Maybe its just you assuming that this must be the only feature for everyone. Sure many people may use it centralized like CVS or SVN. But dont forget the other feature that inherently comes with a ditributed VCS: all copies are more or less complete (all branches and the full history is available) and all branches can be checked out without connecting to a server. I my opinion this is another feature that should not be forgotten. While youre not able to do this with out of box CVS and SVN, Git can be used centralized like the former ones without any problems. So Im able to commit my changes, maybe squash work-in-progress commits together, then fetch and rebase my work onto the main development branch. Other features that come out of box with Git: cryptographically sign commits rebasing (reorder and squash commits; edit commits, not only the message) cherry picking bisecting the history local branches and stashing changes (called shelving in Wikipedia) Also see these three tables in Wikipedia - Comparison of version control software: Features Basic commands Advanced commands So again, maybe the decentralized manner isnt that only feature that make people use it. Why dont people use a distributed workflow for Git in practice? Anyone contributing to or hosting a bigger project on Bitbucked, GitHub etc. will excactly do that. The maintainers keep the main repository, a contributor clones, commits and then sends a pull request. In companies, even with small projects or teams, a distributed workflow is an option when they either outsource modules and dont want externals to modify the sacred development branch(es) without having their changes reviewed before. Is the ability work in a distributed manner even important to modern version control, ... As always: it depends on the requirements. Use a decentralized VCS if any point applies: want to commit or navigate the history offline (i.e. finishing the submodule in the mountain cabin during vacation) provide central repos but want to keep the true repository apart to review changes (i.e. for big projects or distributed teams) want to provide (a copy of) the whole history and branches occasionally while preventing direct access to the central repo (similar to the second one) want to version something without having to store that remotely or setting up a dedicated repository (especially with Git a mere git init . whould be enough to be ready to version something) There are some more but four should be enough. ... or does it just sound nice? Of course it sounds nice - for beginners."} +{"doc_id": 591672, "author": "PraveenMax", "text": "Sometimes arp -a -n wont fetch the ip address. Performing nmap -sP 192.168.1.1/24 will retrieve live hosts and after that if you try arp again, it will show the live hosts. Thats how it worked for me in linux mint. But you can rely on nmap anyday."} +{"doc_id": 67382, "author": "Mikey T.K.", "text": "iStat Menus has a beta going for El Capitan right now - its a paid app, but will definitely do what you want. See link here."} +{"doc_id": 493372, "author": "Tersosauros", "text": "I think youre question comes from an (understandable) always connected mindset. i.e. The central truth ci server is always (or near always) available. While this is true in most environments, I have worked in at least one which was far from this. A Military Simulation project my team worked on several years ago. All the code (Were talking a >US$1b codebase) had to (by law/international agreement, men in dark suits come if you dont) be on machines physically isolated from any Internet connection. This meant the usual situation of we each had 2 PCs, one for writing/running/testing the code, the other to Google things, check E-mail and such. And there was a local network within the team of these machines, obviously not in any way connected to the Internet. The central source of truth was a machine on an army base, in an all-cinderblock underground windowless room (reinforced building, yada-yada). That machine also had no Internet connection. Periodically, it would be someones job to transport (physically) a drive with the git repo (containing all our code changes) to the army base - which was several hundred kilometers away, so, you can imagine. Moreover, in very large systems where you have lots of teams. They will generally each have their own central repo, which then goes back to the actual (god tier) central central repo. I know of at least 1 other contractor who did the same hard-drive git repo dash with their code too. Also, if you consider something on the scale of the Linux kernel... Developers dont just send a pull request to Linus himself. Its essentially a hierarchy of repos - each of which was/is central to someone/some team. The disconnected nature of git means that it can be used in environments where connected model source-control tools (i.e. SVN, for one) couldnt be used - or couldnt be used as easily."} +{"doc_id": 395069, "author": "Rook", "text": "Nooo, of course not. COBOL is a dead language, after all. Or is it? The problem with that view is that programmers at sites like this one usually work with high tech, fast-running (and equally fast burning-out) companies. For them COBOL is a dead language - it is nowhere to be seen. Has not been for some time now, tis true. But COBOL was not meant for them. There is more to the software industry than this. Computers were not invented for people with some irrational need for upgrading and replacing old with new all the time. They were made for business purposes. You want to see COBOL? Go to a company that processes payroll, or handles trucking of goods, or shipping (as in ships), or handles your bank account. There is a huge invisible system of code out there thats practically invisible to the users, and most of them never think about it although they encounter it in one way or another everyday (ATMs?) No, it is not dead. But it is legacy for sure... or is it? Again, depends how you look at it. Nowadays, a lot of people will use Java, C, or anything else instead of COBOL, rewriting from scratch... introducing new bugs as they go along, naturally. That is not saying COBOL doesnt have bugs, and quirks. It does, as much as the next language. Of course it does. But in COBOL times, companies which took bugs more seriously than usual (insurance, banks) tended to produce higher quality code with special quality service groups; today, there are deadlines where time and budget always wins over quality. Also, these systems were originally developed for longer periods back then compared to the equivalent now. If some software has been working for 30+ years, where is the incentive to switch? Whole companies went out of business because they ignored the old adage of if it aint broke, dont fix it. Many tried to rewrite the thing... then the first rewrite cost a lot, then the second one cost even more... and none of those new & improved managed to replace it. As I said, this industry is fast-burning, and it also tends to forget fast. In the 70s COBOL was dead or dying soon, C/C++ were going to rule. Then again in the early 80s Pascal was taking over. Then in the 90s it was Java as THE Language... Think of Unisys Mapper, dBase, Clipper, Cold fusion... do people even remember those? Each one of them was going to be the gravedigger for COBOL. Taking that into account, and the fact that it is great for processing high volumes of transactions, batch processing or record/transaction-oriented processing, and that one can compile (without errors) a subroutine written 30 years old as managed COBOL code and call it from a managed COBOL.NET should one wish to go Windows and .NET, Im having trouble finding a suitable replacement for it. (Im also having trouble finding a Microsoft technology that lasted more then a decade.) Yes, new COBOL code is being written today. One just has to know where to look. For those laughing at COBOL, IMHO, it is like laughing at the Egyptian Pyramids, they are there from 5000 years and they still will be there in next 5000 years, while todays hello world housing needing 24 controls to work will be deleted, replaced, forgotten next month. So where are all those COBOL programmers? Ah, for here lies the rub. The thing is that a lot of them dont have any computing science background. A lot of them are not professional programmers (as in university graduates from a CS/SE program). For the most part, they are people in their late 30s-50s, from all areas of expertise, trained entirely by the company specifically for that job. So they arent COBOL programmers - the training they got is specific to the company which so heavily promotes from within. And that makes them pretty much invisible."} +{"doc_id": 395071, "author": "Kludge", "text": "Probably Not Its not an accomplishment but most of all because it puts everything youve ever said under scrutiny. I dont mind telling my peers that Im ignorant about something so I can learn from them but I dont want an employer seeing that. I just cant trust someone in HR to know that someone that can admit they dont know everything is better than someone that thinks they do."} +{"doc_id": 395073, "author": "Andy Lester", "text": "If you want to have a job as a COBOL programmer, then sure, go ahead and learn it. For any other reason, like trying to learn something useful that might help you with modern programming techniques, no, dont bother."} +{"doc_id": 395075, "author": null, "text": "I havent yet put my profile on my CV, mostly because I havent thought of it while updating. It seems though that its consistently quite high in google searches on my name, so people in the know will have no problem being led towards it. PS. I just created my stackexchange.com profile, have had my stackoverflow.com profile for a few years."} +{"doc_id": 34629, "author": "Henrik B.", "text": "On my (Danish) keyboard layout (under 10.7.5) it is fn + shift + \u2191/\u2193."} +{"doc_id": 165710, "author": "Keith Taylor", "text": "I found a free app, Calendar Event Reminder, that achieves the requirement at https://play.google.com/store/apps/details?id=sk.mildev84.reminder"} +{"doc_id": 264039, "author": "brablc", "text": "Easy detection whether a command is available: CMD=gzip if hash bzip2; then CMD=$_ fi"} +{"doc_id": 493421, "author": "Cort Ammon", "text": "Business logic rewards a centralized server. For nearly all realistic business scenarios, a centralized server is a fundamental feature of the workflow. Just because you have the capacity to do DVCS doesnt mean your primary work flow has to be DVCS. When I use git at work, we use it in a centralized manner, except for those strange odd cases where the distributed bit was essential to keeping things moving along. The distributed side of things is complicated. Typically you want to keep things smooth and easy. However, by using git you ensure that you have access to the distributed side to deal with the gnarly situations that may arise down the road."} +{"doc_id": 132975, "author": "Eric Cloninger", "text": "Just looking around my desk, I see at least 10 phones running Android from 3 manufacturers. And a XOOM. Nearly all of them are activated with my primary gmail account for the sake of getting them going and doing some testing. I also connect some of these devices to my work Google Apps account. Android does exactly the right thing with regards to syncing of data. Email, contacts and calendar works flawlessly. Apps that I purchased on one device show up in the market app for those devices that are compatible, but it doesnt download them until I say to do so. Books I bought are available in my book reader from one device to the next. Apps that I downloaded or paid for but dont want to use on a particular device show up in the market, but Im free to ignore them and not download them. When a new device arrives, usually its a matter of 20-30 minutes to get it to a useful state by downloading the core apps and data that I need. Im sure that could be shorter if I bothered to set up a install routine, but half the fun of getting a new toy is figuring out how it feels in my hands. The new web-based Market interface makes this much easier. I just sync once and it shows up as another device in my Market account. Then I start pushing apps at it."} +{"doc_id": 362352, "author": "wiem fourati", "text": "sort -t : -k 3 filename when your delimiter is : and you need to sort the file filename by the 3rd field."} +{"doc_id": 165748, "author": "janot", "text": "Theres also Smali Patcher program for Windows which creates custom Magisk module for your device and has option to disable FLAG_SECURE system-wide."} +{"doc_id": 427894, "author": "Brian Knoblauch", "text": "No, its not engineering. Were not that scientific and we dont have to pass any of those state engineering tests. In fact, its illegal to call yourself a software engineer in some places due to that lack of testing."} +{"doc_id": 427895, "author": "Thomas Owens", "text": "Is software development engineering? If no, what are the things that it lacks in order to be qualified thus? Yes, software engineering is an engineering discipline. Wikipedia defines engineering as the application of mathematics, as well as scientific, economic, social, and practical knowledge in order to invent, innovate, design, build, maintain, research, and improve structures, machines, tools, systems, components, materials, processes, solutions, and organizations. The result of software engineering is a software system that can improve the lives of people, and it can involve some combination of scientific, mathematical, economic, social, or practical knowledge. In terms of how its viewed, academically and professionally, it varies. Software engineering programs can be accredited by ABET as engineering programs. Software engineers can be members of the IEEE. Some companies consider software engineering to be an engineering discipline, while others dont - its a toss up, really. The best book on this subject is Steve McConnells Professional Software Development: Shorter Schedules, Higher Quality Products, More Successful Projects, Enhanced Careers. It looks at software engineering as a profession, evolution from a craft to a profession, the science of software development, the difference between software engineering and software engineering (applying engineering practices to software versus engineers who happen to build software, with a case study that includes my alma mater), certification and licensing, and ethics. Glenn Vanderburg has a series of talks called Real Software Engineering that has has given between 2010 and 2015 at a number of conferences, along with two related talks, Craft, Engineering, and the Essence of Programming (given in 2011 as a keynote at RailsConf) and Craft and Software Engineering (given in 2011 at QCon London). I think these talks are a pretty comprehensive argument for why software engineering is an engineering discipline. One argument, which Vanderburg brings up briefly in his talks, is the one made by Jack W. Reeves in 1992 (and revisited again in 2005) on what software design is and how code is the output of software engineering design activities (this is also discussed on the C2 wiki). Once you get away from older schools of thought where specification and modeling is software design and into code being software design, some of the relationships between software engineering and other engineering disciplines become more readily apparent. Some differences and the reasons for those differences become even more apparent after you see that economics of software development are vastly different than many other disciplines - construction is cheap (almost free, in many cases), while design is the expensive portion. Is that [CMMI] something that will turn development into engineering? No. CMMI is a process improvement framework that provides guidance to organizations on what kinds of activities are useful when building software. Engineering disciplines typically have an engineering process. Having such a process is important for the successful completion of high quality projects. That said, the CMMI (or any other process framework or methodology) is just a single tool - using it wont make you magically advance from a developer to an engineer. However, not following some kind of process is, in my opinion, a sign of a project that is not an engineering project. Also, what is your opinion on the software engineering courses/certificates? Its only as much value as other people put into it. There are useful courses and there are useless courses. There are valuable certificates, and certificates that arent worth the paper they are printed on. There are a lot of factors, from who is endorsing or accrediting the course or who is issuing the certificate to your current industry of employment to your current job and where you want to go."} +{"doc_id": 67448, "author": "AsTeR", "text": "BetterTouchTool: free and does the job plus a lot of extra ;) I use it in combination with ShiftIt for window resizing."} +{"doc_id": 296825, "author": "Stefanos Chrs", "text": "Another difference I found out today is when sorting based on a delimeter where sort -u applies the unique flag only on the column that you sort with. $ cat input.csv 3,World,1 1,Hello,1 2,Hello,1 $ cat input.csv | sort -t, -k2 -u 1,Hello,1 3,World,1 $ cat input.csv | sort -t, -k2 | uniq 1,Hello,1 2,Hello,1 3,World,1"} +{"doc_id": 427900, "author": null, "text": "Im going to go with No here. My brother is a mechanical engineer, and he describes engineering as The Art of Being Cheap: Engineers are more concerned with getting things done as fast as possible, at the lowest cost possible, with the fewest materials possible. In reaction, Ive come to describe software development (not software engineering - they really are fundamentally two distinct fields) as The Art of Being Efficient: Developers are more concerned with getting things done as fast as possible, at the lowest cost possible, with the least amount of repetition possible. The difference is in the last part of those sentences."} +{"doc_id": 427901, "author": null, "text": "Is software development engineering? No. Being an engineer means your project follows a cause-and-effect timeline - you follow the building codes, therefore your building doesnt fall down (or at least you cant be blamed if it does). Writing software, you can follow all the guidelines going (and theres so many different ones to choose from!) and it still might hang/crash/give wrong answers (unless youre involved in the remarkably small field of writing provable programs in side-effectless functional languages)."} +{"doc_id": 427903, "author": null, "text": "Coming from a typical engineering background, but making a career in software development, I see large similarities between both worlds. Apart maybe from the exact definition of engineering, I see in practice that developing software is not that different from developing a physical product. At least I think it should not be very different. Whether you design an aircraft or a software application, for both you need to: make designs define subsystems and components make prototypes specify and execute tests etc. I read somewhere in an other answer that designing software is different because you do not design everything before you start programming. Well actually to a lesser extent that is also the case when you design a physical product. Designing and prototyping and testing is an iterative process. Also when software projects grow in size it gets more important to define clear subsystems, components and interfaces which is also similar to designing complex products such as an aircraft. That is why I consider developing software to be engineering."} +{"doc_id": 165773, "author": "ffonz", "text": "I know this is an even later answer, but it is worth mentioning it. No rooting needed! No app installation needed!* Which is not even possible if you do not have some other internet connection. There is a project called gnirehtet. Install adb on to the host PC (Windows/Linux/Mac) Download the gnirehtet zip-file to the host Unzip it Run the command Thats it! For more information, read the readme file of gnirehtet on their website. *=Behind the scenes it will install an apk file on to your device via the USB connection."} +{"doc_id": 231312, "author": "mpontillo", "text": "Heres a variation of Alexs answer. I only care about minutes and seconds, but I also wanted it formatted differently. So I did this: start=$(date +%s) end=$(date +%s) runtime=$(python -c print %u:%02u % ((${end} - ${start})/60, (${end} - ${start})%60))"} +{"doc_id": 329619, "author": "G-Man Says 'Reinstate Monica'", "text": "pcregrep has a smarter -o option that lets you choose which capturing groups you want output. So, using your example file, $ pcregrep -o1 foobar (\\w+) test.txt bash happy"} +{"doc_id": 427931, "author": "guillaume31", "text": "I wouldnt consider the term engineering as the most appropriate to describe software development, for 2 main reasons : It conveys a lot of old ideas, concepts and so-called golden rules originating in traditional engineering disciplines such as industrial, civil, naval, or mechanical engineering. Im talking about rules in labour division, production processes, quality standards... These most often only marginally apply to software. It fails to describe in a satisfying way what programming has more than other disciplines (and I believe it has a lot more and a lot different), and what new challenges developers have to face on a day to day basis compared to their counterparts in traditional enineering domains. Softwares virtual and immaterial nature plays a huge role in that. Software development has long been seen as just another engineering discipline. Considering the failure rates of software projects we have known ever since they were measured, its high time we recognized develoment as an entirely new animal, code as a really special material and application lifecycle as a totally different kind of production cycle, and stop desperately trying to apply old recipes to them."} +{"doc_id": 624544, "author": "Ron Maupin", "text": "MAC address filtering itself does not provide much protection. As you pointed out, a MAC address can be cloned. That doesnt mean it cant be part of the overall defense strategy, but it can be a lot of work for very little return. You need a comprehensive security policy which can include such things as: Physical access limitations 802.1X as @robut mentioned, albeit this can be complex and require supporting hardware/software infrastructure, while frustrating legitimate users Port security on switches can be set up to only allow a single (or limited number of) MAC address at any given time, or in any given time period, to prevent connection of hubs, switches, APs, etc., including a port disable for a given time period if violations are detected (care needs to be taken for things like VoIP phones where PCs are connected to the phone since the phone itself will have one or more MAC addresses) You could also implement a policy that requires any switch ports that are not currently used to be disabled (including, perhaps, making sure that unused network cables are not cross-connected in the data closet) As a locksmith friend of mine once told me, Locks only keep honest people honest. The bad guys will always find a way; your job is to make it not worth their efforts. If you provide enough layers of protection, only the most determined bad guys will spend the time and effort. You have to weigh the risks with the resources (primarily time and money, but lost productivity, too) that you are willing to put into securing your network. It may not make much sense to spend thousands of dollars and many man hours to protect that garage-sale bicycle you bought for $10. You need to come up with a plan and decide how much risk you can tolerate."} +{"doc_id": 624546, "author": "Thomas Connard", "text": "Use a VPN internally and treat the section of the network outside secure areas the same way you would treat the internet."} +{"doc_id": 264101, "author": "Enno", "text": "Add BASH_ENV=/home/user/.profile to the crontab. See How to guarantee availability of $BASH_ENV"} +{"doc_id": 624554, "author": "PHoBwz", "text": "Answer to your question = No. I dont think there is one complete answer. The closest would be to have defence in depth. Start as Ron Maupin suggested as having Physical access restricted. Then have 802.1x using EAP-TLS to have authentication to the port. After that you can still have a firewall on the access/ditribution layer. If you are talking more about internal web systems then make sure everyone is authenticated through a proxy also."} +{"doc_id": 67500, "author": "mklement0", "text": "To complement the existing, helpful answers: The accepted answer works in principle, but: relies on extended attributes of the HFS+ filesystem, which are lost when copying the bundle to a filesystem that doesnt support them. For instance, you cannot store a custom icon in a Git repository. the icon displayed by the bundle itself, such as when showing an alert, is still the original icon. percent 20s answer is more comprehensive and portable in principle, but comes with two caveats: Any changes to AutomatorApplet.icns are lost whenever you modify and re-save the bundle in Automator. Thus, for instance, youd need a script to put the updated icons in place programmatically every time the bundle is saved. As others have noted, there are icon caching issues, which can be tricky to resolve; as of OSX 10.10.4: Even a reboot doesnt make the new icons appear in Finder (though it does appear in other contexts such as in the Dock), but there are two workarounds: either (a) move the bundle to a different folder, or (b) rename the bundle; given that reverting to the original path and name makes the problem reappear, youd have to plan ahead: create your bundle in a different location or with a different name, then move / rename to the desired location / name. Alternatively, you can use a tool such as OnyX and check IconServicesunder Cleaning > User to clear the cache."} +{"doc_id": 100269, "author": "Tonyo Madem", "text": "HyperSwitch Does the trick : https://bahoom.com/hyperswitch I tested it. It needs to be updated. Its free. When I Cmd+Tab on minimized app its open!"} +{"doc_id": 559023, "author": "mricon", "text": "As long as you verify the certificate validity, this is perfectly fine and is done all the time."} +{"doc_id": 34735, "author": "Tom Crockett", "text": "In Mac OS 10.8 and above you can send yourself Notification Center messages. Theres a tool called terminal-notifier that you can download or install using Homebrew or Rubygems, e.g.: brew install terminal-notifier Check out https://github.com/julienXX/terminal-notifier for more on this tool. To simplify the common use-case of just caring about the fact of something in the terminal being done, add an alias to your .bash_profile: alias notifyDone=terminal-notifier -title Terminal -message Done with task! Exit status: $? -activate com.apple.Terminal Or, incorporating Austin Lucas answer, you can add a sound and icon badge with tput bel: alias notifyDone=tput bel; terminal-notifier -title Terminal -message Done with task! Exit status: $? -activate com.apple.Terminal Then you can simply do: $ ; notifyDone Once the long-running task finishes youll get a nice modal popup: When you click the notification, it will activate Terminal. If you go to System Preferences > Notifications > terminal-notifier and change the alert style to Alerts, the notification will persist until you dismiss it."} +{"doc_id": 559026, "author": "AJ Henderson", "text": "Yes, this is the standard practice. Doing anything other than this offers minimal additional advantage, if any (and in some cases may harm the security). As long as you verify a valid SSL connection to the correct server, then the password is protected on the wire and can only be read by the server. You dont gain anything by disguising the password before sending it as the server can not trust the client. The only way that the information could get lost anyway is if the SSL connection was compromised and if the SSL connection was somehow compromised, the disguised token would still be all that is needed to access the account, so it does no good to protect the password further. (It does arguably provide a slight protection if they have used the same password on multiple accounts, but if they are doing that, they arent particularly security conscious to begin with.) As MyFreeWeb pointed out, there are also some elaborate systems that can use a challenge response to ensure that the password is held by the client, but these are really elaborate and not widely used at all. They also still dont provide a whole lot of added advantage as they only protect the password from being compromised on an actively hacked server."} +{"doc_id": 34738, "author": "Indolering", "text": "I found Coccinellida, it works on Lion but its new and a bit buggy : /"} +{"doc_id": 133045, "author": "markijbema", "text": "I stumbled upon this question because I want to recharge my phone at night next to my bed. Therefore, I do not want it to wake me, but setting the phone to silent does not solve it, as I want to be reachable for emergency calls. My solution (which isnt strictly a correct answer to the question, but might solve the problem for most people) was using the app Night Ringer Free, which enables me to enter a whitelist for calls/sms, however, all other sounds are silenced."} +{"doc_id": 493495, "author": "Theodore Norvell", "text": "Complexity: With a central repository, a typical work flow might be branch off from the central master branch change the code test possibly go back to changing the code commit merge any new changes from the central master branch test possibly go back to changing the code merge changes into the central master branch and push The complexity with respect to the number of developers in O(1). If instead each developer has their own master branch it becomes, for developer 0: branch off from master branch 0 merge from master branch 1 ... merge from master branch N-1 change the code test possibly go back to changing the code commit merge any changes from master branch 0 merge any changes from master branch 1 ... merge any changes from master branch N-1 test possibly go back to changing the code merge changes into master branch 0 The peer-to-peer approach is O(N). Consistency: Now consider if there is a merge conflict between Alices master branch and Bobs master branch. Each of the N developers could resolve the conflict differently. Result: chaos. There are ways of achieving eventual consistency, but until that happens, all sorts of developer time can be wasted."} +{"doc_id": 559031, "author": "Neil McGuigan", "text": "Not necessarily. You also need to ensure the following: Your site is protected against cross-site request forgeries. Use Synchronizing Token Pattern. Your site is protected against session fixation attacks. Change session id on login. If using session cookies, that your entire site is HTTPS, not just the login URL, and that your session cookie is marked as secure and http only (no JavaScript access). Browser will send session cookie unencrypted if user types http://yoursecuresite (in same browser session). You are using a recent protocol. SSL 1 and 2 are broken, and 3 might be too. Try to use TLS 1.3. You are using a strong cipher. You are not using HTTP compression (GZip) or TLS compression. If your site displays user input (like a search input), then I can figure out your CSRF tokens and bank account number if youre using compression. Your server does not allow insecure client re-negotiation. You are using a 2048-bit RSA key (or the equivalent for an EC key), and that no one else knows your private key. You are using HSTS so browser goes direct to https even if user types http You are using perfect forward secrecy so your historical communications are secure even if your private key is leaked"} +{"doc_id": 362425, "author": "user380458", "text": "You might have a look at Fitus/Zaloha.sh. It is a synchronizer implemented as a bash shell script that uses only standard Unix commands. It is easy to use: $ Zaloha.sh --sourceDir=test_source --backupDir=test_backup"} +{"doc_id": 133050, "author": "Ryan Conrad", "text": "There a a lot of apps that will just use GPS if it is on. even the camera will use it to put location data in the image. google uses it for buzz, maps, latitude, etc. And if you have latitude, it will fire up every once in a while to report your location (which if you have latitude, you told it to do :)). there are some applications that will even use the GPS, if its available, to show you local advertisements. If you optd in for the providing google with location information when you set up your device, the device itself will use it to notify google about wifi locations and cell towers around your location. This data helps start GPS from a cold start, so it can lock on to a location quicker. This data is good, and helps people that actually use the GPS. They are not tracking your every move like some people seem to think. Google, and Apple for that matter, couldnt care less where you are, what they want to know is where the cell towers are so they can tell you where you are with their location applications more quickly. If you are worried about what may be sending data, then the only safe option is to turn off GPS unless you are using it, and check the permissions of applications when you install them. If your Cooking Recipes application wants to use GPS, that you may want to worry a little, unless it is going to tell you how to get to the store to by the ingredients."} +{"doc_id": 100286, "author": "Heider Sati", "text": "I fixed it on mine, it was to do with the BELLMIN Time Machine Queue Backup files (i.e. files that meant to go onto the Time Machine but havent been moved yet due to the TM not reachable or away from home etc). Do the following (no need to buy or download any additional tools): Go to Terminal List all TM objects by doing the following sudo tmutil listlocalsnapshots / It will come back with few items: com.apple.TimeMachine.2018-04-01-122047 com.apple.TimeMachine.2018-04-01-183626 Remove these by doing: sudo tmutil deletelocalsnapshots 2018-04-01-122047 sudo tmutil deletelocalsnapshots 2018-04-01-183626 Where the date is taken from the output from step-3 above. Leave it for a while to catch its breath. You can check the disk space by executing the df -m command and notice the % of your Free Space, doing more df -m every few seconds will show an increase, once done it would stabilise."} +{"doc_id": 34751, "author": "Mathias Bynens", "text": "The keyboard shortcut you\u2019re looking for is \u2318 + \u2325 + \u238b, alternatively known as command + option + escape. This will bring up the Force Quit Applications window (see screenshot below)."} +{"doc_id": 559041, "author": "Andy Boura", "text": "As others have said this is a standard approach. However for a personal site I wouldnt necessarily follow it... I would use federated login from Facebook, Google or similar as that way I dont have to handle account life-cycle issues, and can use Google 2 factor Auth etc. It saves having quite a few forms and fields in your database which means less to go wrong. Of course you would still need to authorise those users you wish to be able to access either through a function of the authentication provider such as a Facebook group, some sort of whitelisting of allowed users, or an approval work flow off your account. Sometimes this is done by inviting users: giving them a URL containing a unique secure code and the your system linking that to an Auth provider on first login. Alternatively users authneticate and request access. This places them in a pending state. You then provide an interface where you can login and approve them."} +{"doc_id": 1986, "author": null, "text": "You can use FileMerge, Apples diff solution. Its free and it comes with every Mac OS X install. The only downside is that you have to install the Developer Tools. You can find them on your DVD install that came when you bought your Mac (Snow Leopard or earlier). You can also get the developer tools from the App Store if your version of the OS supports that. Then, you can find it at /Developer/Applications/Utilities/FileMerge.app"} +{"doc_id": 1987, "author": "Michael H.", "text": "Agree with the recommendation for FileMerge.app. You also have the free, cross-platform DiffMerge program, but I like FileMerge better."} +{"doc_id": 165829, "author": "raddevus", "text": "You can also try uploading the APK to the free VirusTotal.com virus checker. It will generate a unique hash for the APK and if others have already uploaded it to test it (very likely) then you will have more of an idea that the APK is valid. The service will also scan the APK with over 60 virus scanners and let you know if it believes it has similar signatures to viruses already found."} +{"doc_id": 100299, "author": "Henry DeYoung", "text": "I had the same problem with the update to 10.13.4. I was able to boot to Safe Mode (hold Shift during power up) and then restart normally into 10.13.3. For now Im happy with 10.13.3 and I hope that the update will be patched in the near future. I hope this might help."} +{"doc_id": 100300, "author": "Steve Chambers", "text": "In the past I have used a procedure for upgrading macOS versions that I had good successes with, especially when discussion groups and news are seeing problematic installs: using a system cleaner. EG using something like Onyx before installing, then a reboot and proceed with the install. Yah, too late now for you but something to consider for the next time. If the restarting in Safe mode doesnt do the trick (it may...) If I were you I would use Recovery Mode and restore from a Time Machine backup. If I remember correctly you have the ability to restore from a specific point in time."} +{"doc_id": 264139, "author": "Juan", "text": "I had the same problem last week and I tried a lot of programs, like debugfs, photorec, ext3grep and extundelete. ext3grep was the best program to recover files. The syntax is very easy: ext3grep image.img --restore-all or: ext3grep /dev/sda3 --restore-all --after `date -d 2015-01-01 00:00:00 +%s` --before `date -d 2015-01-02 00:00:00 +%s` This video is a mini tutorial that can help you."} +{"doc_id": 2003, "author": "Kyle Cronin", "text": "You can do page up/down and home/end on a Macbook keyboard by using the fn and the arrow keys: fn+\u2191 is PageUp fn+\u2193 is PageDown fn+\u2190 is Home fn+\u2192 is End"} +{"doc_id": 100307, "author": "gregseth", "text": "You can also use Better Touch Tool to override the shortcuts (globally or for any specific application). Given I was already using this app, its been the solution for me since none of the other answers mentioned here worked for me (High Sierra), and I didnt want to install new software."} +{"doc_id": 2008, "author": "Robert S Ciaccio", "text": "For Eclipse it appears that these keys arent even set to anything by default, at least in my installation. You probably need to set them in the preferences: I dont know exactly where the end and home Windows-like functions are in this screen, I couldnt find anything yet that sounds like what they do. Something like move cursor to end\\beginning of line was what I was looking for. If youre talking about Safaris address bar, try \u2318+\u2192 and \u2318+\u2190 for end and home. Those work in lots of other apps as well."} +{"doc_id": 493529, "author": "Agent_L", "text": "Simple: Companies are centralized organizations, with centralized workflow. Every programmer has a boss and he has his boss, etc up to CTO. CTO is the ultimate source of technical truth. Whatever tool company uses, it must reflect this chain of command. A company is like an army - you cant let privates outvote a general. GIT offers features that are useful to the companies (eg. pull requests for code review) and that alone makes them switch to GIT. The decentralized part is simply a feature they dont need - so they ignore it. To answer your question: The distributed part is indeed superior in distributed environment, eg open-source. Results vary depending on whos talking. Linus Torvalds is not exactly your cubicle rat, thats why different features of GIT are important to him than to your github-centric company."} +{"doc_id": 100314, "author": "Brian Raymes", "text": "I had the same problem (15-inch, Late 2016 MacBook Pro). I was able to fix my issue without losing data by running First Aid (from Disk Utility) when the laptop was in the crashed state. Once First Aid completed the pass on the drive, I simply rebooted and the OS loaded successfully."} +{"doc_id": 34783, "author": "ghoppe", "text": "A modern, powerful, but paid (currently Mar-2013 $69.99) file merging application for OS X is Kaleidoscope. It handles folders, files, and even images. Ad copy from the page: Compare text in Blocks, Fluid and Unified layouts in both Two-Way and Three-Way modes. Quickly navigate and search through the most readable diff youve ever seen."} +{"doc_id": 100319, "author": "Swisher Sweet", "text": "Fortunately I was able to simply reinstall macOS High Sierra by booting into Recovery Mode (Command \u2318 + R) and choosing Reinstall macOS High Sierra. I did not erase my drive first since system volume was intact (no errors after running Disk Utility). I simply reinstalled the OS and I was able to log back in with all my data, settings, and apps still there just like before. Also, this installed the 10.3.4 version of macOS High Sierra, so I dont have to go through the update process again."} +{"doc_id": 198628, "author": "user435943", "text": "locate uses a prebuilt database, which should be regularly updated, while find iterates over a filesystem to locate files. Thus, locate is much faster than find, but can be inaccurate if the database -can be seen as a cache- is not updated (see updatedb command). Also, find can offer more granularity, as you can filter files by every attribute of it, while locate uses a pattern matched against file names."} +{"doc_id": 198629, "author": "Warren Young", "text": "locate(1) has only one big advantage over find(1): speed. find(1), though, has many advantages over locate(1): find(1) is primordial, going back to the very first version of AT&T Unix. You will even find it in cut-down embedded Linuxes via Busybox. It is all but universal. locate(1) is much younger than find(1). The earliest ancestor of locate(1) didnt appear until 1983, and it wasnt widely available as locate until 1994, when it was adopted into GNU findutils and into 4.4BSD. locate(1) is also nonstandard, thus it is not installed by default everywhere. Some POSIX type OSes dont even offer it as an option, and where it is available, the implementation may be lacking features you want because there is no independent standard specifying the minimum feature set that must be available. There is a de facto standard, being BSD locate(1), but that is only because the other two main flavors of locate implement all of its options: -0, -c, -d, -i, -l, -m, -s, and -S. mlocate implements 6 additional options not in BSD locate: -b, -e, -P, -q, --regex and -w. GNU locate implements those six plus another four: -A, -D, -E, and -p. (Im ignoring aliases and minor differences like -? vs -h vs --help.) The BSDs and Mac OS X ship BSD locate. Most Linuxes ship GNU locate, but Red Hat Linuxes and Arch ship mlocate instead. Debian doesnt install either in its base install, but offers both versions in its default package repositories; if both are installed at once, locate runs mlocate. Oracle has been shipping mlocate in Solaris since 11.2, released in December 2014. Prior to that, locate was not installed by default on Solaris. (Presumably, this was done to reduce Solaris command incompatibility with Oracle Linux, which is based on Red Hat Enterprise Linux, which also uses mlocate.) IBM AIX still doesnt ship any version of locate, at least as of AIX 7.2, unless you install GNU findutils from the AIX Toolbox for Linux Applications. HP-UX also appears to lack locate in the base system. Older real Unixes generally did not include an implementation of locate. find(1) has a powerful expression syntax, with many functions, Boolean operators, etc. find(1) can select files by more than just name. It can select by: age size owner file type timestamp permissions depth within the subtree... When finding files by name, you can search using file globbing syntax in all versions of find(1), or in GNU or BSD versions, using regular expressions. Current versions of locate(1) accept glob patterns as find does, but BSD locate doesnt do regexes at all. If youre like me and have to use a variety of machine types, you find yourself preferring grep filtering to developing a dependence on -r or --regex. locate needs strong filtering more than find does because... find(1) doesnt necessarily search the entire filesystem. You typically point it at a subdirectory, a parent containing all the files you want it to operate on. The typical behavior for a locate(1) implementation is to spew up all files matching your pattern, leaving it to grep filtering and such to cut its eruption down to size. (Evil tip: locate / will probably get you a list of all files on the system!) There are variants of locate(1) like slocate(1) which restrict output based on user permissions, but this is not the default version of locate in any major operating system. find(1) can do things to files it finds, in addition to just finding them. The most powerful and widely supported such operator is -exec, but there are others. In recent GNU and BSD find implementations, for example, you have the -delete and -execdir operators. find(1) runs in real time, so its output is always up to date. Because locate(1) relies on a database updated hours or days in the past, its output can be outdated. (This is the stale cache problem.) This coin has two sides: locate can name files that no longer exist. GNU locate and mlocate have the -e flag to make it check for file existence before printing out the name of each file it discovered in the past, but this eats away some of the locate speed advantage, and isnt available in BSD locate besides. locate will fail to name files that were created since the last database update. You learn to be somewhat distrustful of locate output, knowing it may be wrong. There are ways to solve this problem, but I am not aware of any implementation in widespread use. For example, there is rlocate, but it appears to not work against any modern Linux kernel. find(1) never has any more privilege than the user running it. Because locate provides a global service to all users on a system, it wants to have its updatedb process run as root so it can see the entire filesystem. This leads to a choice of security problems: Run updatedb as root, but make its output file world-readable so locate can run without special privileges. This effectively exposes the names of all files in the system to all users. This may be enough of a security breach to cause a real problem. BSD locate is configured this way on Mac OS X and FreeBSD. Write the database as readable only by root, and make locate setuid root so it can read the database. This means locate effectively has to reimplement the OSs permission system so it doesnt show you files you cant normally see. It also increases the attack surface of your system, specifically risking a root escalation attack. Create a special locate user or group to own the database file, and mark the locate binary as setuid/setgid for that user/group so it can read the database. This doesnt prevent privilege escalation attacks by itself, but it greatly mitigates the damage one could cause. mlocate is configured this way on Red Hat Enterprise Linux. You still have a problem, though, because if you can use a debugger on locate or cause it to dump core you can get at privileged parts of the database. I dont see a way to create a truly secure locate command, short of running it separately for each user on the system, which negates much of its advantage over find(1). Bottom line, both are very useful. locate(1) is better when youre just trying to find a particular file by name, which you know exists, but you just dont remember where it is exactly. find(1) is better when you have a focused area to examine, or when you need any of its many advantages."} +{"doc_id": 395247, "author": "Matthieu M.", "text": "Null references are a mistake because they allow non-sensical code: foo = null foo.bar() There are alternatives, if you leverage the type system: Maybe foo = null foo.bar() // error{Maybe does not have any bar method} The generally idea is to put the variable in a box, and the only thing you can do is unboxing it, preferably enlisting the compiler help like proposed for Eiffel. Haskell has it from scratch (Maybe), in C++ you can leverage boost::optional but you can still get undefined behaviour..."} +{"doc_id": 2037, "author": "mipadi", "text": "\u2318+\u2192 works like a PCs End (moves the cursor to the end of the line). \u2318+\u2190 works like a PCs Home (moves to the beginning of the line). ctrl+A and ctrl+E (Emacs-style keybindings) work in most OS X applications as well."} +{"doc_id": 198645, "author": "Russell Borogove", "text": "find is not possible for a novice or occasional user of Unix to successfully use without careful perusal of the man page. Historically, some versions of find didnt even default the -print option, adding to the user-hostility. locate is less flexible, but far more intuitive to use in the common case."} +{"doc_id": 231429, "author": "ctrl-alt-delor", "text": "bash keeps it in working memory, bash can be configured to save it when bash closes or after each command, and to be loaded when bash starts or on request. If you configure to save after each command, then consider the implications of having multiple bash running at same time. (command lines will be interleaved)"} +{"doc_id": 395270, "author": "Jonas", "text": "null is evil There is a presentation on InfoQ on this topic: Null References: The Billion Dollar Mistake by Tony Hoare Option type The alternative from functional programming is using an Option type, that can contain SOME value or NONE. A good article The \u201cOption Pattern that discuss the Option type and provide an implementation of it for Java. I have also found a bug-report for Java about this issue: Add Nice Option types to Java to prevent NullPointerExceptions. The requested feature was introduced in Java 8."} +{"doc_id": 231430, "author": "Michael Homer", "text": "Bash maintains the list of commands internally in memory while its running. They are written into .bash_history on exit: When an interactive shell exits, the last $HISTSIZE lines are copied from the history list to the file named by $HISTFILE If you want to force the command history to be written out, you can use the history -a command, which will: Append the new history lines (history lines entered since the beginning of the current Bash session) to the history file. There is also a -w option: Write out the current history to the history file. which may suit you more depending on exactly how you use your history. If you want to make sure that theyre always written immediately, you can put that command into your PROMPT_COMMAND variable: export PROMPT_COMMAND=history -a"} +{"doc_id": 231442, "author": "JaySo", "text": "Commands are saved in memory (RAM) while your session is active. As soon as you close the shell, the commands list gets written to .bash_history before shutdown. Thus, you wont see history of current session in .bash_history."} +{"doc_id": 296981, "author": "David Winiecki", "text": "In Sierra: Use UseKeychain. (I havent tried this but it sounds like the most correct solution.) Or ssh-add -K /your/key echo ssh-add -A | cat >> ~/.bash_profile Or create a plist file instead of appending to ~/.bash_profile. Disclaimer: as others have noted, I dont know how secure it is to store ssh passphrases in keychain."} +{"doc_id": 100377, "author": "Jem Lawton", "text": "Same issue here. I was under the impression that I was running 10.13.4 but after a restart was presented with the same Installer Log error screen. I was able to reinstall macOS High Sierra (Command \u2318 + R at boot) without needing to wipe my drive and my data and settings were retained and I was successfully updated to 10.13.4."} +{"doc_id": 329758, "author": "kenorb", "text": "Using grep is not cross-platform compatible, since -P/--perl-regexp is only available on GNU grep, not BSD grep. Here is the solution using ripgrep: $ rg -o foobar (\\w+) -r $1 mtu 65536 qdisc noqueue state UNKNOWN group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever [ ... ] Note the output is more terse: It does not show counts of packets handled in normal or other ways. For that, add the option -s (-stats, -statistics): $ ip -s addr 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever RX: bytes packets errors dropped overrun mcast 74423 703 0 0 0 0 TX: bytes packets errors dropped carrier collsns 74423 703 0 0 0 0 But what you actually want to see may be this: $ ip -stats -color -human addr 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever RX: bytes packets errors dropped overrun mcast 74.3k 700 0 0 0 0 TX: bytes packets errors dropped carrier collsns 74.3k 700 0 0 0 0 It shows counts with suffixes like 26.1M or 79.3k and colors some relevant terms and addresses. If you feel the command is too long, use the short options: This is equivalent: ip -s -c -h a"} +{"doc_id": 133204, "author": "bmaupin", "text": "Note: many of these applications use map data from a really cool free map project called OSM (OpenStreetMap), which anyone can contribute to. OSM is constantly improving, but in many areas it may lack information like house/building numbers. Free: Google Maps Google Maps does have some offline functionality (including offline navigation as of version 9.17), but the size of the area you can download is limited (you cant download an entire US state, for instance) and some countries or areas arent available for download. See here for instructions: Download a map and use it offline HERE Maps Unless youre looking to support OSM, this is probably the best free option out there. It works offline, has maps in nearly 200 countries, and turn-by-turn navigation in over 100 countries. Features include full street addresses, points of interest, alternate routes, traffic, public transport, and speed limits. Notable features that are missing: ability to add multiple stops to a route, lane assist, rerouting based on traffic, and ability to navigate to contacts addresses. OsmAnd Open-source, uses data from OSM. Currently only has 2D navigation, but one of the better free options available. MapFactor OSM-based offline navigation. Fairly basic but promising. TomTom maps available as an in-app purchase. Navmii (formerly NavFree): North/Latin America, elsewhere Ad-supported. Allows you to download maps for offline navigation, also OSM-based. OSM has maps for the whole world, but for some reason Navmii only has maps available for certain countries, mostly in Europe and North America. Many others based on OSM here: Android - OpenStreetMap Wiki Paid: CoPilot Live Premium: USA, elsewhere CoPilot Live is a very nice, full-featured app, which will let you download maps ahead of time and use GPS offline. I ended up using it because the US version was so cheap compared to the other paid navigation apps, but Ive been very pleased with the quality, features, customer support, and free app and map upgrades. It has all the features I could want in a GPS app, the only downsides being you have to have a paid subscription for traffic data (the paid app includes a free one-year subscription), and you will have to buy new maps if you travel outside your purchased area. Other paid apps that I havent used: Garmin viago Magellan SmartGPS NDrive Scout (formerly Skobbler): USA, elsewhere Sygic TomTom There are detailed reviews of many Android GPS apps here: Android Sat Nav Apps"} +{"doc_id": 133207, "author": "Chris Stratton", "text": "Sidestepping the debate over the legitimacy of installing that app on your phone, the question of verification is one that Ive been meaning to understand for a while, and youve prompted me to try to figure out a possible way of verifying who signed an apk. Android apps are signed in the normal manner of .jar files (.apk is really just a special .jar which is just a special .zip) however it may not be trivial to trace the authenticity of the certificates unless you have something known good to compare to. Thats basically what the phone itself does - verifies that something that claims to be from the same party as something already on the phone actually is - the phone doesnt refuse to install things with unknown signers, it can only (object to/clear application data of) apparent forgeries when something new doesnt match something old that it claims to. You will need to have jarsigner and keytool. I believe these come from the JDK which is a prerequisite to the android SDK rather than the SDK itself. First you want try to verify the public key contained within the .apk. Usually this is in META-INF/CERTS.RSA but it can be in another file - unzip -l will tell you. You want to see what you can find out about it: unzip -p suspect.apk META-INF/CERT.RSA | keytool -printcert Thats going to dump out a lot of information about who the signer claims to be. Some certificates are apparently themselves signed by known parties, but without figuring out how to trace that, I suspect you could do something like this: unzip -p suspect.apk META-INF/CERT.RSA | keytool -printcert | grep MD5 unzip -p knowngood.apk META-INF/CERT.RSA | keytool -printcert | grep MD5 If you have a known trusted apk from the same author who used the same certificate. Im assuming that the certificates having the same MD5 sum is enough. Assuming youve decided to trust the certificate, then you can see if it has been used to sign each of the files within the .apk jarsigner -verbose -verify suspect.apk (If theres more than one .RSA file in the archive, you should add the -certs flag to tell you which certificate(s) have been used to sign each file, so you can be sure its the certificate you verified)"} +{"doc_id": 428130, "author": "Sean McMillan", "text": "There are two major trends in version control right now; Distribution, and Integration. Git is great at decentralization. SVN has lots of tools built that integrate with it. Its common to set up your SVN server so that when you check in a fix, you mention the bug number in the checkin comment, and it automatically sets that but to an in testing state, and alerts the assigned tester that they need to look at it. Then you can tag a release and get a list of all the bugs fixed in this release. The tools that do this with SVN are mature, and used every day."} +{"doc_id": 428136, "author": "Grant Palin", "text": "My reasons: maturity - both the server, and the tools (e.g. TortoiseSVN) simplicity - less steps involved, a commit is a commit. DVCS such as Git and Mercurial involve commit, then push. binary handling - SVN handles binary executables and images better than Git/Hg. Especially useful with .NET projects since I like to check build-related tools into source control. numbering - SVN has a readable commit numbering scheme, using just digits - easier to track revision numbers. Git and Hg do this quite differently."} +{"doc_id": 428146, "author": "jokoon", "text": "I post this as a response rather than a comment. I admit than DVCS are quite in the trend right now, but Ill try to tell why. DVCS are better because just like lots of people have been saying, this is the way we should have been working from the beginning. Its true, you can do with a DVCS what you used to with SVN, so in a way that makes SVN obsolete. However, not every software project is as good as it gets: Long term project have a lot benefited from DVCS, because it uses less overhead, allows much better management (branching etc), and is greatly supported by hosts like google code and github. Those projects are not the only ones, there are other kind of projects that are being developed in companies without any assistance from the outside world or internet: everything is done internally, and often on a short term. A good example: a video game. Code evolves rapidly. For the latter case, developers dont really need branching or sophisticated features DVCS can offer, they just want to share source code and assets. The code they make is not likely to be reused, because there is a deadline. They rely on SVN rather than a DVCS because of several reasons: Developers have machines that belong to the company, and this might change rapidly. Configuring a repo is a loss of time. If those guys dont have their own machine, they are less likely to be working on the same source code/part of the project. Plus one for the centralized data. network speeds and big money allows the use of a centralized server that deal with everything SVN, its bad practice but backups are made etc. SVN is just simpler to use, even if its the wrong way: synchronizing files on peers without redundancy is not a simple problem, and do it the right way just cannot make it in time if you always want it to be perfect. Think about the game industry machine and how SVN is a time saver; people communicates much more on those projects because games are about repetitive programming and adaptive code: there is nothing hard to code, but it has to be done the right way, ASAP. Programmers are hired, they code their part, compile it, test it a little, commit, done, game testers will deal with the rest. DVCS are made for the internet and for very complex project. SVN are for small, short term, tight team projects. You dont need to learn a lot from SVN, its almost a FTP with a dumb diff."} +{"doc_id": 32995, "author": "nkkollaw", "text": "For Open/Save dialogs, you can do CMD/shift/.(period) This should toggle hidden files visibility."} +{"doc_id": 428158, "author": "calum_b", "text": "Speed. Sometimes takes 10 or 15 minutes to clone a big git repository, while a similar sized subversion repository takes a couple of minute to check out."} +{"doc_id": 264319, "author": "Timothy Pulliam", "text": "To tell if you are in a login shell: prompt> echo $0 -bash # - is the first character. Therefore, this is a login shell. prompt> echo $0 bash # - is NOT the first character. This is NOT a login shell. In Bash, you can also use shopt login_shell: prompt> shopt login_shell login_shell off (or on in a login shell). Information can be found in man bash (search for Invocation). Here is an excerpt: A login shell is one whose first character of argument zero is a -, or one started with the --login option. You can test this yourself. Anytime you SSH, you are using a login shell. For Example: prompt> ssh user@localhost user@localhosts password: prompt> echo $0 -bash The importance of using a login shell is that any settings in /home/user/.bash_profile will get executed. Here is a little more information if you are interested (from man bash) When bash is invoked as an interactive login shell, or as a non-interactive shell with the --login option, it first reads and executes commands from the file /etc/profile, if that file exists. After reading that file, it looks for ~/.bash_profile, ~/.bash_login, and ~/.profile, in that order, and reads and executes commands from the first one that exists and is readable. The --noprofile option may be used when the shell is started to inhibit this behavior."} +{"doc_id": 362625, "author": "Angsuman Chakraborty", "text": "By default, Nmap scans only the most common 1,000 ports for each protocol(tcp,udp). If your port is outside that then it wont scan it and hence wont report it. However, you can specify ports you want to scan with -p option."} +{"doc_id": 34948, "author": "yihangho", "text": "The solution provided by @Konrad Rudolph is not entirely correct anymore as the GCC formula that he mentioned was moved from homebrew/dupes to homebrew/versions. You can choose which version of GCC to install. For example, at the time of writing this answer, version 4.5, 4.7 and 4.8 are available. You may check out what versions are available here. In short, you can install GCC 4.8 by using brew tap homebrew/versions brew install [flags] gcc48 You can view available install flags by using brew options gcc48"} +{"doc_id": 559247, "author": "Tobias Kienzler", "text": "While there is much potential harm possible when you use your real name, one thing you should not neglect is others using your name. And while some sites offer means to remove content that seems or seeks to harm your reputation, many dont. So my take on this is, instead of (passively) fearing for your reputation, actively make sure it is a good one e.g. by contributing well to sites such as this one. Anyone searching for your name will then hit all the positive things you actually did instead of finding mud (and possibly lies) others claim."} +{"doc_id": 2198, "author": "Kyle Cronin", "text": "If youre just looking for the Unicode versions of Mac OS X keys, you can use this Apple support document to copy and paste them: Mac keyboard shortcuts https://support.apple.com/en-us/HT201236 \u2318 Command (or Cmd) \u21e7 Shift \u2325 Option (or Alt) \u2303 Control (or Ctrl) More generally, Mac OS X provides a pane to insert special characters. Youll find it under Edit -> Emoji and Symbols in any program that takes text input. The Command key symbol can be found by searching for its name place of interest. To insert the character, double click it. If youre really hardcore and are looking for a way to type the character by entering the Unicode hex code, this is possible: Go into System Preferences -> Keyboard -> Input Sources, click +, scroll to others, select Unicode Hex Input and click Add From the input source selector in the menu bar, select Unicode Hex Input To enter a Unicode character, hold down option and type the 4-digit hex code for the character and it will be inserted. In this case, it would be option+2318."} +{"doc_id": 428184, "author": "jammycakes", "text": "There are a few things that IMO are likely to put new users off Git: The Git culture is more command-line centric. While both tools do tend to focus too much on the command line (as Ive said several times, command line instructions may be powerful and more fluent, but they are not a good marketing strategy) this is much more the case with Git. Whereas Mercurial has a de facto standard GUI tool in TortoiseHg, which is even the default download option for Windows users on the Mercurial homepage, Git has several competing GUI front ends (TortoiseGit, Git Extensions, gitk, etc) which arent well advertised on the Git website, and which are all a complete eyesore anyway. (Black text on red labels? Cmon, TortoiseGit, you can do better than that!) Theres also a much more pervasive attitude in the Git community that people who use GUI tools are not proper software developers. Git has several out-of-the-box defaults that make perfect sense to advanced users, but are likely to be surprising if not intimidating to new users. For instance, it is much more aggressive about automating tasks such as merging (for example, git pull automatically merges and commits where possible). There is a case for fully automated merges, but most inexperienced users are terrified of merging, and need to be given the opportunity to gain confidence in their tools before unleashing their full power on their source code. Documentation and inherent complexity: $ hg help log | wc -l 64 $ git help log | wc -l 912"} +{"doc_id": 559259, "author": "user53510", "text": "In general I would recommend against (except in the relatively rare set of circumstances in which you have no legal or practical means to avoid doing so), using your real name on-line. The reasons for this have been quite well-described above, but I would also add a couple of other ones : (1.) While -- today -- in the so-called democratic countries, we do have a reasonable expectation of protection from government harassment based on our self-expressed political views, you should keep in mind that (particularly in crises), this can change very rapidly. Think, for example, of someone who advocated the overthrow of the Saudi government, prior to the 9/11 terrorist attacks. Using your real name on-line gives a suddenly-repressive government, a trivially easy way of identifying you as an enemy of the state. (2.) Children should not be allowed to use their real names on-line, under ANY circumstances. In saying this, Im not repeating the rather tired (and greatly-exaggerated) fear of cyber-perverts; rather, Im saying that it is not appropriate that every silly or immature thing that a child does or says on-line, should haunt him or her, for the rest of his or her life. The specific thing that you have to keep in mind here, is that the Internet does not have a way of telling an onlooker, how old was the person, when he or she posted this particular vulgar YouTube video. This is a paradigm shift that no previous generation has faced and we need to err on the side of caution, where childrens identities are concerned. In general -- there are a number of significant drawbacks to revealing your real identity on-line, but conversely there are very few compensating advantages. This tips the balance towards keeping your privacy by using a pseudonym."} +{"doc_id": 100509, "author": "Travis", "text": "At the installation log, I clicked on startup disk and chose Macintosh HD and restarted. The system successfully booted into 10.13.3. Now Im trying to update the OS from the App Store again."} +{"doc_id": 100513, "author": "Timothy Rascher", "text": "One thing that worked for me was holding down the Option key and selecting the original partition to boot into. Then downloading the update directly from Apple and installing it. macOS High Sierra 10.13.4 Combo Update can be downloaded here."} +{"doc_id": 100514, "author": "Robert B", "text": "I got exactly the same problem: after 8-9 minutes got the message to restart because the update didnt work. I had the same with the previous update, but didnt learn my lesson. Then I downloaded the update and with the dmg file managed to update. Mistakenly I didnt this time. So, first I restarted in safe mode, which worked, but when I restarted again back in normal mode, I got the same problem. Then I restarted with the space bar. Didnt work. Same problem. Then I restarted in recovery mode, and that finally worked. However, regretfully my most recent time machine backup was 2 weeks old (I was traveling), meaning I lost 14 days of work. For the past hour I have been trying to download the dmg file instead of using the update via apple store. First attempt, the download got stuck at 577KB; second attempt stuck at 4.1MB. So, now I decided to wait & see and ignore the apple store message to update, until I read some good news."} +{"doc_id": 329890, "author": "trinth", "text": "Simple no-brainer answer, which is a variation on OPs version. Sometimes, you just want something easy to type or remember: find . | xargs file | grep -i broken symbolic link Or, if you need to handle NULL terminators: find . -print0 | xargs -0 file | grep -i broken symbolic link"} +{"doc_id": 34981, "author": "delcoursolultions", "text": "Building on graups answer, use Preview to export the image to a PDF, and then use Adobe Reader to print out the image in a poster format."} +{"doc_id": 559272, "author": "gowenfawr", "text": "There is no security vulnerability per se with having a + in your email address. Its permitted as per RFC 2822, and not particularly useful for SQL or other common forms of injection. However, many systems (lets call Meetup a system for this purpose) enforce security through whitelisting, not blacklisting. Someone defined a limited list of characters they expected to see in email addresses (probably upper, lower, numeric, ., _, and -) and wrote a filter to block anything outside that list. And they didnt think anyone would use +, so youre out of luck. This article describes how to set up Postfix to tag, and to use - instead of + because: However, during a recent discussion on the Postfix user list, it was mentioned that some websites (particularly banks) use JavaScript to try and validate email addresses when they are entered into online forms, and that many don\u2019t allow the plus symbol as a valid character in an email address. I switched from + to - over a decade ago, for similar reasons."} +{"doc_id": 559274, "author": "Stuart Caie", "text": "They are likely incompetent and may not even know they are lying to you. From a input-validation standpoint, they have no leg to stand on; there are a specific set of RFCs that robustly describe the format of a valid email address. Even if an email address is technically invalid according to the RFC, it may still be possible to deliver mail to it anyway. You can check an email address is deliverable by sending mail to it with a unique link for the owner to click on. As the linked site says: Sadly, many websites wont let you register an address with a plus sign in it. Not because they are trying to defeat your tracking strategy but just because they are crap. Theyve copied a broken regular expression from a dodgy website and they are using it to validate email addresses. And losing customers as a result. Even if we assume they have completely broken code and they are using a crap validator because they literally paste the validated email address into a UNIX shell, that still doesnt give a valid reason why joe.bloggs+meetup.com.better.not.sell.this.to.spammers@example.com should be considered invalid. There is no valid security reason to ban the use of + in email addresses. Even if spammers use it as a low-rent way of making multiple accounts, websites can easily see if someone signed up both as joe.bloggs+1@example.com and joe.bloggs+2@example.com; if Joe Bloggs is abusing their service, they can easily ban both accounts. Anyone with their own mail server can generate valid email addresses like joe.bloggs1@example.com, joe.bloggs2@example.com. They gain no additional security by prohibiting the use of +."} +{"doc_id": 133295, "author": "Matthew Read", "text": "A good way to back up app data if youre not rooted is to used adb from the Android SDK. For example, to back up the data for Locale, youd do something like the following: adb pull /data/data/com.twofortyfouram.locale/ C:\\backup\\locale\\ And to restore, just use adb push with the same arguments in reverse order, i.e.: adb push C:\\backup\\locale\\ /data/data/com.twofortyfouram.locale/ You should be able to back up apps from /data/app/ the same way."} +{"doc_id": 493747, "author": "Demi", "text": "Use SipHash. It has many desirable properties: Fast. An optimized implementation takes around 1 cycle per byte. Secure. SipHash is a strong PRF (pseudorandom function). This means that it is indistinguishable from a random function (unless you know the 128-bit secret key). Hence: No need to worry about your hash table probes becoming linear time due to collisions. With SipHash, you know that you will get average-case performance on average, regardless of inputs. Immunity to hash-based denial of service attacks. You can use SipHash (especially the version with a 128-bit output) as a MAC (Message Authentication Code). If you receive a message and a SipHash tag, and the tag is the same as that from running SipHash with your secret key, then you know that whoever created the hash was also in possession of your secret key, and that neither the message nor the hash have been altered since."} +{"doc_id": 559285, "author": "Question Overflow", "text": "As others have pointed out, there are no real security reasons to disallow a plus sign in email address. Being a web developer, the main reason I can think of is to prevent users from making multiple registrations using different aliases, especially on an E-commerce website where you have a one-time free offer that you want to limit to each individual. From the point of view of a database administrator, an extra query on the database has to be made if you want to accept the plus symbol and at the same time limit the registration of email to one alias per email address. Otherwise setting UNIQUE key on the email address would be sufficient to prevent an INSERT if the email address is a duplicate. The above reasons are a combination of technical and commercial decisions which would not be easily explained in a line of words. If you are using an alias in your email address, an easy solution is to drop that part off in order to register."} +{"doc_id": 460982, "author": "Tone", "text": "This is an incredible opportunity for you! Lets take an entrepreneurial viewpoint on this. Assuming that management wants this project to succeed you are in a great position to help them do so. The reason this realization is so important is because you have to develop the conviction and confidence that the warning signs youre seeing are in fact going to lead to this projects failure[1]. This is your chance to practice important skills in systematic thinking and interpersonal communication. Understand and visualize the issues and potential opportunities that are being missed so you can develop a strategy to communicate these as clearly and simply as possible. Recognize your opportunity here to improve you skills. [1] Canceling the project would actually be success. Failure would be spending good money after bad."} +{"doc_id": 264376, "author": "Vivek parikh", "text": "Using AWK to count the lines in a file awk END {print NR} filename"} +{"doc_id": 460989, "author": "Eran Medan", "text": "What can you do Treat this in your own self excellence terms, its their project, but also yours, take ownership even though you know it will fail. Why? because a) you might help it fail a little less, b) in time of challange, this is where you learn the most c) you should measure yourself via your own metrics of excellence, doing your best might not save the project, but you might end up still proud of yourself Talk to other fellow developers and see what they think, you will probably find out that many share the same frustration, if done carefully (dont make people think you want to start a mutiny or something) then this can help not just you but others as well Ignoring the problem is not going to make it go away, talking about ways how to, against all odds still pull it through, e.g. at least get some decent must haves, or the main wow factor use case done right, might make this project fail less miserably. How to do it? well, few ideas of when going gets tough tough gets going or desperate times justify desperate measures and other cliches, for example: massive use of OSS, extreme programming / agile methodologies, weekend hackathons (volunteers only, not forcing people to work weekends). This is time where you can show leadership (carefully if you are not in a team lead / senior position) but you can take this advantage and become their mockingjay, just get people to feel that they might get this project just a little less fail than everyone knows it will. You can show some leadership skills here that might help you later along the way, treat the problem as a challenge. Make sure your management knows, but very, very carefully, if they know, theyll appreciate you telling them this in a non confrontational, calm way, if they dont they will appreciate it even more, and wonder why no one told them about it before. You should tell them this as a service, without any emotional side, just plain facts, and without an agenda. If theyll ask you what you think they should do (which is a great sign, but rare) then see the next section What you cant do, but your management probably should They should not add more people to the project to help see this Call the customer immediately and tell them the bad news. Why is this a good idea? well, Ill leave quoting the manifesto for agile software development, but even without it, even water fall lovers hate bad surprises. If the customer knows in advance this project is doomed to fail, they will be unhappy, but will be much happier that you tell them there are issues before it blows in their face, and that you are on it, and doing your best to accommodate it. The customer can do many things but most of them are not worse than finding out in the last minute they dont have a deliverable (or they do but its of non usable quality). The customer will appreciate it as they will be able to delay hiring testing staff, offshore IT people, change their internal training plans, and all just because you were honest with them. with the customer, think of ways to still make something out of the project, the most common one is descoping things. for example there might be some features that are just too hard to develop the way they were designed, and if the customer will agree for some small modifications and simplifications, then some features will become simpler. Update their own higher management, it will help them keep their job as well... What you should NOT do Ask to add more people to the project (see this) Quit / look for another job (at least not yet), this is something you can learn from, and what will make you one day a better developer or better manager. Youll learn to appreciate many things better, better time manage, design better, write code better, and work with peers and management better. Look for a job after these 2 months are over if you dont like working there, not because of any other reason. Whine, complain, or be negative about the project, management, the bad design you inherited, the newbie developers you got to mentor that it takes you 3 hours to explain them to do something that takes you 1 hour, be positive as much as you can. Criticize management and become a target as a trouble maker, they are in the same boat, and they might know things that you dont, all you can do is update them (always update your own direct manager, never bypass her / him) Blame people, (or yourself), it doesnt help, never Take it too seriously, unless its medical equipment, no one is likely to die if you miss the deadlines, its software, we miss deadlines for a living, relax. Thats just my two cents, YMMV"} +{"doc_id": 198845, "author": "jlliagre", "text": "/dev/console is a virtual set of devices which can be set as a parameter at boot time. It might be redirected to a serial device or a virtual console and by default points to /dev/tty0. When multiple console= options are passed to the kernel, the console output will go to more than one device; /dev/tty[0-N] (N is the highest attributed TTY number, e.g. 63) is one of the virtual consoles you switch to with control-alt-F1 and so on; /dev/tty0 is also by default virtual console; /dev/tty is kind of an alias to the console (physical, virtual or pseudo device, if any) associated to the process that open it. Unlike the other devices, you do not need root privileges to write to it. Note also that processes like the ones launched by cron and similar batch processes have no usable /dev/tty, as they arent associated with any. These processes have a ? in the TTY column of ps -ef output."} +{"doc_id": 198847, "author": "dchirikov", "text": "From the Linux Kernel documentation on Kernel.org: /dev/tty Current TTY device /dev/console System console /dev/tty0 Current virtual console In the good old days /dev/console was System Administrator console. And TTYs were users serial devices attached to a server. Now /dev/console and /dev/tty0 represent current display and usually are the same. You can override it for example by adding console=ttyS0 to grub.conf. After that your /dev/tty0 is a monitor and /dev/console is /dev/ttyS0. An exercise to show the difference between /dev/tty and /dev/tty0: Switch to the 2nd console by pressing Ctrl+Alt+F2. Login as root. Type sleep 5; echo tty0 > /dev/tty0. Press Enter and switch to the 3rd console by pressing Alt+F3. Now switch back to the 2nd console by pressing Alt+F2. Type sleep 5; echo tty > /dev/tty, press Enter and switch to the 3rd console. You can see that tty is the console where process starts, and tty0 is a always current console."} +{"doc_id": 460998, "author": "Sjuul Janssen", "text": "Find the problems your project is running into and try to clearly quantify as objectively possible. With every metric you quantify make sure you define why you believe this metric is important. You want to give your manager insight into what the consequences would be if a certain metric would not be within acceptable range. You will need to give some guidelines for each metric to indicate which values are Good, Acceptable, Problematic or Bad. Define every criterion up front. If possible you could describe what would be minimally needed for a project to success and contrast this with the current project. Static Code Quality can be quantified by lots of static analysis tools. You can keep this as simple or detailed as you see fit. The metrics I suggest you start with: Cyclomatic Complexity Size of your code (eg, Nr of lines per function/class, number of files, number of tables...) Identify which modules are too large Duplication. Adherance to code style Defect rate per KLOC by module and or subsystem (identify troublesome parts of your system) you could calculate how much per team member but I think you should keep that for yourself solved vs found time needed to solve a bug (perhaps if this is inclining make a graph of this) perhaps make an estimate of how much time is needed if this keeps going at the current rate Planning Extrapolate time used for the features built. Take into account complexity of the feature. This doesnt have to be very precise. What you want to convey with this would be along the lines of Feature A, B and C are around the same complexity of D, E and F. With features ABC used 170% if the planned time. If nothing changes we expect the same time is needed for DEF or along the line of The average feature is taking X% time longer than calculated. We have no reason to assume that the rest of the functionality is easier to build so we should compensate for this in the future planning. It is of no use to have a planning if it is not realistic. Try to have some monthly or preferably even shorter release schedule. If only internal. This could help you further with extrapolating the time you need for the project. This could also save you from making unrealistic commitments (if you dont commit to new work before a release is done). Make a planning for each cylce and sure new features only enter into the next cycle (ie. they can never be added to the current cycle). Test coverage: explain the normal test coverage values and show how what your current coverage is Documentation: How much % is actually documented? How good? Modularity: Class based (coupling and cohesion) Package based Subsystem based (how many communication paths are there?)"} +{"doc_id": 133318, "author": "LifeH2O", "text": "Hey! there is an app for that Android Usb Port Forwarding http://www.codeproject.com/kb/android/usbportforwarding.aspx I am a bit confused how to use it, please inform if you get it working perfectly."} +{"doc_id": 100554, "author": "Florian Dierickx", "text": "You can use the Easy New File Creator, a free app for macOS. Using Easy New File Creator, a finder extension you can add create new file functionality in the Finder context menu. You can customise the file name and extension for file to be created."} +{"doc_id": 329941, "author": "Chad", "text": "I was having the exact same problem with PuTTY connecting to an Ubuntu 16.04 machine. It was puzzling because PuTTYs pscp program was working fine with the same key (and the same key worked in PuTTY to connect to another host). Thanks to the valuable comment from @UtahJarhead, I checked my /var/log/auth.log file and found the following: sshd[17278]: userauth_pubkey: key type ssh-dss not in PubkeyAcceptedKeyTypes [preauth] It turns out that newer versions of OpenSSH dont accept DSA keys by default. Once I switched from a DSA to an RSA key, it worked fine. Another approach: this question discusses how to configure the SSH server to accept DSA keys: https://superuser.com/questions/1016989/ssh-dsa-keys-no-longer-work-for-password-less-authentication?lq=1"} +{"doc_id": 133340, "author": "eldarerathis", "text": "It has to do with whether or not youve currently got a good connection to Googles servers for sync services and the like. From page 27 of their Android 2.3 Users Guide: Network status icons turn green if you have a Google Account added to your phone and the phone is connected to Google services, for syncing your Gmail, Calendar events, contacts, for backing up your settings, and so on. If you don\u2019t have a Google Account or if, for example, you\u2019re connected to a Wi-Fi network that is not connected to the Internet, the network icons are white. Im not really sure why it bounces back and forth between green and white sometimes. Ive noticed it on my phone but everything works fine (I run CyanogenMod, though, so perhaps it has to do with that). It could also be that green means its currently in the act of syncing, but the wording in the document is a little vague, in my opinion. In the case of Honeycomb or Ice Cream Sandwich, the colors are blue/grey instead of green/white, but they still have the same meaning. Edit: For the interested, here are links to the user guide in other languages. The above is the English version."} +{"doc_id": 297182, "author": "Pablo A", "text": "Everyone here talks about the great dmidecode command and the -t parameter, but with sudo lshw -short you also get easily the product name and model: $ sudo lshw -short H/W path Device Class Description ==================================================== system UX303UB (ASUS-NotebookSKU) /0 bus UX303UB Other great commands for getting hardware info: inxi [-F] All-in-one and friendly, written in Perl. Try inxi -SMG -! 31 -y 80 lscpu # Better than /proc/cpuinfo lsusb [-v] lsblk [-a] # Better than df -h. Block Device Information. sudo hdparm /dev/sda1"} +{"doc_id": 231654, "author": "dajon", "text": "Can use a standard string comparison to compare the chronological ordering [of strings in a year, month, day format]. date_a=2013-07-18 date_b=2013-07-15 if [[ $date_a > $date_b ]] ; then echo break fi Thankfully, when [strings that use the YYYY-MM-DD format] are sorted* in alphabetical order, they are also sorted* in chronological order. (* - sorted or compared) Nothing fancy needed in this case. yay!"} +{"doc_id": 133354, "author": "eldarerathis", "text": "CyanogenMod 7 supports this. It activated by going to Settings->CyanogenMod Settings->Applications and checking Permission management as of the most recent build. You can then allow and disallow permissions by choosing an app from the app management list (Settings->Applications->Manage applications). There is an article on endgaget with a Youtube demonstration. Disclaimer: This may be obvious to some, but denying permissions to an app could have fairly crash-tastic consequences. Nonetheless, if you have a device that is supported by CM and you are willing to root and install it, you can enjoy permission-by-permission control (and any hazards that come with it). In fact, due to the crashes that the permission management implementation tended to cause, it was removed from the CyanogenMod codebase in version 9. However, the most recent nightly builds of CyanogenMod 10.1 now include a feature that has been dubbed Privacy Guard. Instead of blocking apps from accessing data that they request, Privacy Guard will provide them with blank data. As an example, if an app running under Privacy Guard requests your contacts list, CM will simply return an empty list, causing the app to functionally believe that you dont have any contacts stored on your phone."} +{"doc_id": 592107, "author": "Arminius", "text": "If displaying the wrong URL in the tooltip requires Javascript, how did tech-supportcenter get their Javascript onto the Google search results page? The scammers did not manage to inject JS into the search results. That would be a cross-site scripting attack with much different security implications than misleading advertisement. Rather, the displayed target URL of a Google ad is not reliable and may conceal the actual destination as well as a chain of cross-domain redirects. The scammers possibly compromised a third-party advertiser and hijacked their redirects to lead you to the scam site. Masking link targets is a deliberate feature of Google AdWords. It is generally possible to specify a custom display URL for an ad link which can be different from the effective final URL. The idea is to enable redirects through trackers and proxy domains while keeping short and descriptive links. Hovering over an ad will only reveal the display URL in the status bar, not the real destination. Here is an example: Im searching for shoes. The first ad link displays www.zappos.com/Shoes: When I click on it, I actually get redirected multiple times: https://www.googleadservices.com/pagead/aclk?sa=L&ai=DChXXXXXXXd-6bXXXXXXXXXXXXkZw&ohost=www.google.com&cid=CAASXXXXXp8Yf-eNaDOrQ&sig=AOD64_3yXXXXXXXXXXXXXYX_t_11UYIw&q=&ved=0aXXXXXXHd-6bUXXXXXXXXXwIJA&adurl= -- 302 --> http://pixel.everesttech.net/3374/c?ev_sid=3&ev_ln=shoes&ev_lx=kwd-12666661&ev_crx=79908336500&ev_mt=e&ev_n=g&ev_ltx=&ev_pl=&ev_pos=1t1&ev_dvc=c&ev_dvm=&ev_phy=1026481&ev_loc=&ev_cx=333037340&ev_ax=23140824620&url=http://www.zappos.com/shoes?utm_source=google%26utm_medium=sem_g%26utm_campaign=333037340%26utm_term=kwd-12666661%26utm_content=79908336500%26zap_placement=1t1&gclid=CI3vqXXXXXXXXXXXXXBBA -- 302 --> http://www.zappos.com/shoes?gclid=CI3vXXXXXXXXXXXXXMBBA&utm_source=google&utm_medium=sem_g&utm_campaign=333037340&utm_term=kwd-12666661&utm_content=79908336500&zap_placement=1t1 Obviously, Google has strict destination requirements for ad links in place and an ordinary customer wont get their ad approved if they set the link target to a completely different domain. But scammers do occasionally find ways around the vetting process. At least, Googles policy about destination mismatches is pretty clear: The following is not allowed: Ads that dont accurately reflect where the user is being directed [...] Redirects from the final URL that take the user to a different domain [...] Trusted third-party advertisers may be permitted to issue cross-domain redirects, though. Some of the exceptions are listed here, e.g.: An example of an allowed redirect is a company, such as an AdWords Authorized Reseller, using proxy pages. [...] For example: Original website: example.com Proxy website: example.proxydomain.com We allow the company to use example.proxydomain.com as the final URL, but retain example.com as the display URL. One major weak spot is that Google doesnt control the third-party redirectors (in above example, thats pixel.everesttech.net). After Google has vetted and approved their ads, they could simply start redirecting to a different domain without immediately getting noticed by Google. Its possible that, in your case, attackers managed to compromise one of these third-party services and pointed their redirects to the scam site. In recent months, there have been several press reports about an almost identical scam pattern, e.g. this report about a fraudulent Amazon ad whose display URL spells out amazon.com but redirects to a similar tech support scam. (By now, your discovery has also been picked up by a few news sites, including BleepingComputer.)"} +{"doc_id": 559342, "author": "user53510", "text": "I would tend to agree that this is primarily a compliance-driven requirement with at best a marginal net increase in security (at, unfortunately, a substantial cost in loss of operational availability, due to otherwise legitimate users being locked out after 90 days, machine-to-machine communications failing because their passwords expired and nobody updated them, calls to the Help Desk to resolve password reset problems and so on). That having been said, there are valid reasons for enforcing such a policy (although -- these justifications are greatly lessened by the relatively lengthy validity period for a particular password... after all if a cyber-crook gets your password for 90 days, there is a lot of damage that he or she can do). The biggest advantage comes in the following scenario : You get hacked or otherwise compromised, and the cyber-crook finds out your username and password. It happens to be near the change threshold time period (typically -- the end of the quarter, and dont think cyber-crooks dont know that). You are required to change your old password (which both you and the cyber-crook, know). You follow Company policy and change your password, meaning that now the cyber-crook is locked out again. He or she can try to use the same methods as before, to get unauthorized access to this credential as well...but doing so may be annoying and time-consuming. The point here is, changing ones password to something new, is not something that a cyber-criminal will normally do, because from his or her point of view (unless, of course, the password hijack is really a kind of Denial-of-Service attack), changing the password and locking the legitimate (original) owner out of the account, will immediately alert the legitimate user that something untoward, is going on. This is on top of the fact that cyber-criminals usually hijack thousands of passwords at a time; changing all of these, particularly because they may not have access to the back-end systems set up to allow legitimate users to do this, can be an onerous task. None of the above is written ignoring the fact that cyber-crooks will usually set up their own, privileged account, the moment that they get unauthorized access to your system, or is meant to ignore the other potential weaknesses in the mandatory 90-day password change paradigm that is so prevalent these days. Think of this rule as one (minor) element in your layered defense strategy, and youll see that it has a place... but its certainly not something that you should rely on, to keep the bad guys out."} +{"doc_id": 198895, "author": "Freiheit", "text": "Yes. From the manpage: -k, --insecure (TLS) By default, every SSL connection curl makes is verified to be secure. This option allows curl to proceed and operate even for server connections otherwise considered insecure. The server connection is verified by making sure the servers certificate contains the right name and verifies successfully using the cert store. See this online resource for further details: https://curl.haxx.se/docs/sslcerts.html See also --proxy-insecure and --cacert. The reference mentioned in that manpage entry describes some of the specific behaviors of -k . These behaviors can be observed with curl requests to test pages from BadSSL.com curl -X GET https://wrong.host.badssl.com/ curl: (51) SSL: no alternative certificate subject name matches target host name wrong.host.badssl.com curl -k -X GET https://wrong.host.badssl.com/ ..returns HTML content..."} +{"doc_id": 592117, "author": "Harper - Reinstate Monica", "text": "This is a common abuse in paid advertising (note the Ad icon at the tail of your left arrow). Advertisers want to track people who click on Google ads, partly to independently confirm Googles click billing, and partly to give away free cookies. So they request search engines to send users to a ClickURL which does that, and then forwards to the proper destination. The ClickURL may be off site, for instance at the ad agency. The advertiser wants to provide a separate DisplayURL, which is simply the URL shown in the text ad. To hide the ugly ad agency URL, and to show a neatly displayed URL, instead of the actual destination URL (which may be lengthy e.g. a specific product page). This DisplayURL is being abused by the phishers. The search engine is never provided the destination URL (where the ClickURL should forward to). Since the ClickURL is often on a different domain than the DisplayURL, this is hard to police. Target may retain several SEOs, each using a different Gooogle ID or ad agency, so theres nothing weird about a random Google ID running ads with a target.com DisplayURL all of a sudden. Fairly likely that the advertiser is a small business and got phished: i.e. the spammer got ahold of their Google user credentials, discovered a Google Ad account with stored credit card data, and is running ads on their dime."} +{"doc_id": 166136, "author": "Irfan Latif", "text": "Aurora Store is an open source fork of Yalp Store which provides a Material UI to the later. Aurora Store is an UnOfficial FOSS client to Googles Play Store, with an elegant design, using Aurora you can download apps, update existing apps, search for apps, get details about in-app trackers and much more. You can also Spoof your Device Information, Language and Region to get access to the apps that are not yet available or restricted in your Country | Device. Aurora Store does not require Googles Proprietary Framework to operate, it works perfectly fine with or without GooglePlayService or MicroG. Thereby avoiding the various privacy issues."} +{"doc_id": 100605, "author": "John", "text": "I would use this command. shasum -a 256 -c <<<_paste hash to compare here_ *_path to file goes here_ Example: shasum -a 256 -c <<< 0d2ea6de4f2cbd960abb6a6e020bf6637423c07242512596691960fcfae67206 */Users/USERNAME/Downloads/someprogram.dmg"} +{"doc_id": 133376, "author": "BobFlemming", "text": "Its been tested by a group here: http://arstechnica.com/gadgets/news/2011/01/researchers-enable-mesh-wifi-networking-for-android-smartphones.ars So it is possible."} +{"doc_id": 166146, "author": "gdeff", "text": "The Samsung SoundAssistant is your huckleberry. Free app. Provides system-wide fine control over volume, with adjustable increments and per-application settings."} +{"doc_id": 428332, "author": "Jon Purdy", "text": "It is absolutely not normal for a group that size to be working without source control\u2014the size of the largest group of programmers that can work effectively without source control is less than or equal to one. It\u2019s absolutely inexcusable to work without version control for a professional team of any size, and perhaps I\u2019m not feeling creative, but I can\u2019t come up with any reason why you would want to forgo it. Version control is just another tool\u2014a particularly powerful one, and one which delivers enormous benefits relative to its minimal cost. It gives you the power to finely manage all of your changes in an organised fashion, with all kinds of other handy things like branching, automated merging, tagging, and so on. If you need to build a version from umpteen versions ago, you can check out the code from that point in time and just build without having to jump through any other hoops. More importantly, if you need to write a bugfix, you can merge it into an update without having to deliver the new features you\u2019re working on\u2014because they\u2019re on another branch, and as far as the rest of the development needs to be concerned, they don\u2019t exist yet. You\u2019re experiencing resistance because you\u2019re challenging the culture of the company. It will take time for them to adjust, no matter what you say. The best you can do is keep pushing for it, and if the company really won\u2019t budge, find another job that\u2019s better suited to your level as a developer."} +{"doc_id": 428336, "author": "V4Vendetta", "text": "It is by no means a normal scenario and i think you should give a tough fight for getting this setup in your company. It has far reaching benefits and has no point in realizing the same when you approach doomsday and it aint the situation which fall under If it aint broken dont fix it Any reason for not implementing it could be only an excuse for bad work or a blunder waiting to happen. Just tell them how great its to find what the app was on 1 Jan this year how about hey this functionality was added in March i think we need to expand a bit more on this. Wow this bug 154256 has been fixed in this commit. i can branch out the app and send out the deployment no problem guys you can carry on working. This can go on and on ... (remember to add comments on commits later on else that would be coming in as another question )"} +{"doc_id": 428337, "author": "Treb", "text": "Is it normal for a group of this size not to have source control? In my experience, it is not the norm, but not as completely unusual as other answers here suggest. The majority of small companies does use source control, but a significant number doesnt, unfortunately. I have so far been given only vague reasons for not having source control - what reasons would you suggest could be valid for not implementing source control, given the information above? See this question on SO for a good discussion. The accepted answer says: There are no good reasons not to use version control. Not one. Are there any more reasons for source control that I could add to my arsenal? The consensus among all developers and project managers I have met, and of course here on Programmers and SO is that source control is a must. Its an accepted best practice. If somebody decides to ignore it, he needs to have some very strong and convincing arguments why this is the right decision for him (i.e. a little more than we never needed it, so why should we need it in the future). The arguments you have presented so far are focused on specific issues, perhaps you should try a wider approach along the lines of everybody does it, so why dont we as well?"} +{"doc_id": 428339, "author": "Geerten", "text": "Is it normal for a group of this size not to have source control? No definitely not. When I started at my current company, there was one person whom you should send your changed code to, and he would merge it. I could convince everybody within days to use SVN. I have so far been given only vague reasons for not having source control - what reasons would you suggest could be valid for not implementing source control, given the information above? I think the reason you only heard vague reasons is because there are no valid reasons for not using version control. Are there any more reasons for source control that I could add to my arsenal? Yes, there are a lot of reasons. Branching gives you the possibility to develop new functionality without interfering with other developments. Each commit gives you the information about what exactly has been changed, who did that change, and when that change was made. You can easily commit bugfixes, and deploy them to the customers, and leave out the unfinished new functionality. You can maintain different versions, if customers are afraid of going to a newer version. You can work on the same project (even the same source files!) with ease. You can easily revert a mistake, with preserving the changes after that committed mistake."} +{"doc_id": 428340, "author": "Benjol", "text": "It may not be normal, but as Treb says, its probably not that unusual As others have said, there are no valid reasons for not having source control in a company your size. So you need to identify and attack the invalid reasons: a) the main one is the status quo: if it aint broke, dont fix it. This is difficult: you could start pointing out all the things which arent working as well as they should (which can quickly get you labelled as a negative person), or you just wait for something to blow up (which might never happen), or you could emphasise all the things that could go wrong. Unfortunately people in charge of small companies are relatively impervious to things which could go wrong until they actually do go wrong... b) ignorance/fear/uncertainty: we dont really understand what source control is; we dont know how to use it; we dont know which tool to choose; its going to cramp our style. This is one reason I definitely wouldnt send them here! It might be a fairly tall order for you on your own, but I think to maximise your chances you need to present a turn-key solution, and not too many variants or alternatives. You need a clear idea of: how you want to fit/transform your working process to work with the given tool; how/if you plan to back-port existing code; how fast you think you can train users and have them up and running; how you can manage the transition period (if there is one); how much it is likely to cost (in hours, if not in dollars). c) there may be other reasons (previous bad experiences with VSS for example, or having read horror stories about allegedly over-complicated tools), but youll have to bat those ones individually when you discover them. There are ample reasons for source control outlined in the other answers. My advice would be to pick out the main 2 or 3 that could really make a difference to your company and polish them up and present them in a meeting to as many of your colleagues as possible. Try to provoke discussion: even if you dont immediately convince them, you will gain insight into what the sticking points may be. (Have you read/heard about The Change Function?)"} +{"doc_id": 133429, "author": "Johhny", "text": "WIFI Web Login works great for those that require a username/password or an email, or even just a checkbox to agree the EULA."} +{"doc_id": 133432, "author": "eldarerathis", "text": "Yes, there are a couple of options for this, including: SSHDroid (does not require rooted phone) QuickSSHD (see this Google thread for some discussion) Dropbear (requires rooted phone, see this Droidforums thread for some discussion) SSHelper (does not require rooting; free software -- under GPL; it incorporates also code from other projects; might eventually appear in the F-Droid repository) some other free (= libre) software projects of an ssh server have been mentioned in the discussion of their potential inclusion into F-Droid Some of these apps will require you to root your device. SSHDroid, QuickSSHD and SSHelper, at least, do not (there may be others as well). There are certainly multiple choices for you to investigate depending on your requirements/desired features/etc."} +{"doc_id": 133436, "author": "TheyGowByrd", "text": "Try Sound Manager. Schedule volumes of types of sounds with it (6 categories). Switch off notification (Battery full, e-mail notification etc.) volume from 12 to 7 while leaving untouched ring and media volumes and youre all set. Works on my brandnew Samsung Galaxy s2. Slept well last night without a disturbance! I used to swich to buzz only before going to sleep and to remember to switch it back to ring in the morning. Sleep well!"} +{"doc_id": 35136, "author": "Richard J. Ross III", "text": "This is an aggregation of the answers posted on SOs deleted clone, Graphical diff for Mac OS X. It includes links to each product, and the current price since last edit in USD. Note that any links to SO will only be visible to users who can view deleted content, which requires either moderator privileges or 10k reputation on that site. Sourcegears DiffMerge, shareware Joachim Eibls KDiff3, free Black Pixels Kaleidoscope, $69.99 Bare Bones Softwares TextWrangler, free Araxiss Merge, $129 + $29/year Deltopias DeltaWalker, $39.95/$75.95 Kai Willadsens Meld, free Weipin Xias DiffFork, $26 Biscades RoaringDiff, free tkdiff, free There were some other suggestions, which were not diff tools in their own right, which I will list below: It (Beyond Compare) runs well in Wine, but I miss the shell integration. - Danyal Aytekin, May 11 12, 13:01 Eclipse also has a fairly decent comparison mechanism. - JeeBee, Oct 9 08, 12:45 P4Merge that comes with Perforce is pretty good and comes for free with perforce client. Both terminal and GUI version. - amok, Jul 7 10, 23:19 I had to use Vim because most of diffs mentioned here do not understand UTF-8. - user184880, Oct 6 09, 9:56 That about wraps up that threads merge into this one, as covered here."} +{"doc_id": 231746, "author": "michas", "text": "VirtualBox will create a private network (10.0.2.x) which will be connected to your host network using NAT. (Unless configured otherwise.) This means that you cannot directly access any host of the private network from the host network. To do so, you need some port forwarding. In the network preferences of your VM you can, for example, configure VirtualBox to open port 22 on 127.0.1.1 (a loopback address of your host) and forward any traffic to port 22 of 10.0.2.1 (the internal address of your VM) This way, you can point putty to Port 22 of 127.0.1.1 and VirtualBox will redirect this connection to your VM where its ssh daemon will answer it, allowing you to log in."} +{"doc_id": 297284, "author": "Simon Rig\u00e9t", "text": "us - Time spent in user space sy - Time spent in kernel space ni - Time spent running niced user processes (User defined priority) id - Time spent in idle operations wa - Time spent on waiting on IO peripherals (eg. disk) hi - Time spent handling hardware interrupt routines. (Whenever a peripheral unit want attention form the CPU, it literally pulls a line, to signal the CPU to service it) si - Time spent handling software interrupt routines. (a piece of code, calls an interrupt routine...) st - Time spent on involuntary waits by virtual cpu while hypervisor is servicing another processor (stolen from a virtual machine)"} +{"doc_id": 35143, "author": "nohillside", "text": "The idea behind splitting is that it allows you to keep a certain part of the shell buffer displayed while continuing to enter new commands. So only the lowest split does allow keyboard input. To position the view on the shell buffer use the scroll bar. You can un-split by pressing Shift-Cmd-D."} +{"doc_id": 35145, "author": "bmike", "text": "Yes - all computer batteries from Apple (including the newer unibody models where the battery is not consumer replaceable) are easily swapped in a 10 to 35 minute procedure to open, inspect, replace, test and document the repair. This assumes the technician has all the parts, adhesives, solvents in place and has done a dozen or so of this exact model so they only have to refresh the steps and not have to carefully study the manual and find each screw and piece to remove. Of course, your wait time might be longer if no one is free to start work immediately or the part needs to be retrieved from a nearby stock room. If the part isnt in stock, Apple should be able to quote you a delivery time to order the part, and discuss if you want to leave the Mac for service, choose mail in service or leave with your mac and return later to get the repair done as same- or next-day service once the part and the Mac are in the shop together. Since you called AppleCare and explained your symptoms, my guess is they cannot run the diagnostics remotely to establish a true failure (or if you agree to pay for the repair) and pre-order the part before you present the Mac for repair in the store. My experience is each store has sufficient stock to do several repairs of a battery type, but that being said, Ive also gone in on a busy day where they had three machines needing the same part as I and I was the third in line and therefore had the option of leaving the Mac or waiting for the part to arrive. Worst case, you get a diagnosis and dont leave the machine in for repair, but have options to pre-order the part when you return or find another store. If the store mails in your Mac for repair, often it can get overnighted to a return location of your choosing and clearly wherever they ship the machine will be same day service for an in-stock battery at the main repair depot."} +{"doc_id": 592206, "author": "Jen Brannstrom", "text": "One aspect of this answer provided by Arminius, is that it had to be an agency trusted by Target at some point. Because when you bid on brand names in AdWords it always gets flagged for copyright reasons. Unless your AdWords account has been whitelisted. This can be a CSV list of accounts that a brand name / copyright permits to place ads on Google on their behalf. See the form here So apart from the technical reasons explained in other answers here, it is almost impossible to have done this without access to that brand name inside your adwords account. And that can only have come from a whitelisted ad-agency that Target, at some point had trusted their AdWords management with. Or an outsourced agency on their behalf that was overlooked. If there would be any so-called exploit for this issue, then it is this sort of social engineering, ie: getting on that whitelist as accredited AdWords agency on behalf of a brand. As background info: A few years ago it was common for us to be offered the opportunity to buy AdWords accounts from the newly established ad agencies in China. Chinese agencies had been given access to AdWords and seemingly in a state of euphoria Google was allowing them unlimited account creation. Accounts that were abusing the AdWords TOS, and they ostensibly never got blacklisted. On the other side Big Brands were outsourcing their AdWords account management to these Chinese agencies because their management rates were simply too good. Thats definitely one possible scenario of how you could get access to such a well-known brand."} +{"doc_id": 362838, "author": "Mohammed Omer", "text": "i have one short hand command cp */* . this will copy all subfolders content on level up of course you can use move mv */* . or assign new distenation cp */* /destination"} +{"doc_id": 100705, "author": "Nimesh Neema", "text": "Executing brew list command shows a simple, alphabetically sorted list of all the installed packages. However, various required packages (dependencies) get automatically installed when installing a package using Homebrew. It is possible to view the list of all the installed packages as a nicely formatted dependency tree. To view it, execute the following command: brew deps --tree --installed An example output is as shown below: gdbm openssl python \u251c\u2500\u2500 gdbm \u251c\u2500\u2500 openssl \u251c\u2500\u2500 readline \u251c\u2500\u2500 sqlite \u2502 \u2514\u2500\u2500 readline \u2514\u2500\u2500 xz readline sqlite \u2514\u2500\u2500 readline xz The independently listed packages (e.g. gdbm and openssl in the example output above) have no dependencies. The packages depicted as part of a tree structure have their dependency listed at immediate lower level (e.g. package sqlite requires that the package readline to be installed). The packages listed at leaf nodes in the tree structures have no dependencies. Dependencies visualised in a tree structure can help in easily getting rid of the unnecessary packages."} +{"doc_id": 100707, "author": "Nimesh Neema", "text": "You can use KeepingYouAwake. It is a free and lightweight menu bar utility for macOS (Version 10.10 and newer) that can prevent your Mac from entering sleep mode for a predefined duration or as long as it is activated. It can be easily installed using Homebrew cask by running: brew cask install keepingyouawake It is an alternative to widely popular tool Caffeine from Lighthead Software. Caffeine hasnt been updated in a while. KeepingYouAwakes icons are Retina-compatible and do look nice in the dark themed menu bar. Once you have activated KeepingYouAwake, you can simply lock the screen (by pressing Control + Command + q), turn down the screen brightness and remain assured that the system wont go to sleep!"} +{"doc_id": 199015, "author": "abhixec", "text": "No vi doesnt have any significant advantage over vim rather its the other way around. Vim has more advantages then Vi. You may be interested in : Why, oh WHY, do those #?@! nutheads use vi? Edit also read : Is learning VIM worth the effort?"} +{"doc_id": 199017, "author": "dchirikov", "text": "The advantage is that vi usually preinstalled in enterprise UNIX like AIX or Solaris. Besides vim is not accessible on installation media."} +{"doc_id": 100724, "author": "fsb", "text": "To open multiple windows, simply go to File -> New -> Open New Main Window. This will allow you to open your calendar and your email is separate windows so you can see both at the same time."} +{"doc_id": 428404, "author": "David Schwartz", "text": "The English words have overlapping meanings but slightly different implications. Property implies something possessed by something else. Attribute suggests something that is an inherent characteristic. However, because their meanings almost completely overlap, you can substitute one for the other. The exception would be where the terms are jargon, for example in C# where property and attribute have different, much more specific, meanings. In HTML, there is something specifically called a tag attribute and calling it a property would be confusing. If youre inventing new jargon for a specific context, I would say to prefer whichever sounds more natural. If its something discrete that something else contains, Id generally prefer property. If its something that is an inseparable inherent characteristic of something else (and especially if its something other things can have as well), Id generally prefer attribute."} +{"doc_id": 264583, "author": "kenorb", "text": "Its not deprecated, but the backticks (`...`) is the legacy syntax required by only the very oldest of non-POSIX-compatible bourne-shells and $(...) is POSIX and more preferred for several reasons: Backslashes (\\) inside backticks are handled in a non-obvious manner: $ echo `echo \\\\a` $(echo \\\\a) a \\a $ echo `echo \\\\\\\\a` $(echo \\\\\\\\a) \\a \\\\a # Note that this is true for *single quotes* too! $ foo=`echo \\\\`; bar=$(echo \\\\); echo foo is $foo, bar is $bar foo is \\, bar is \\\\ Nested quoting inside $() is far more convenient: echo x is $(sed ... <<<$y) instead of: echo x is `sed ... <<<\\$y\\` or writing something like: IPs_inna_string=`awk /\\`cat /etc/myname\\`/{print $1} /etc/hosts` because $() uses an entirely new context for quoting which is not portable as Bourne and Korn shells would require these backslashes, while Bash and dash dont. Syntax for nesting command substitutions is easier: x=$(grep $(dirname $path) file) than: x=`grep \\`dirname \\$path\\\\` file` because $() enforces an entirely new context for quoting, so each command substitution is protected and can be treated on its own without special concern over quoting and escaping. When using backticks, it gets uglier and uglier after two and above levels. Few more examples: echo `echo `ls`` # INCORRECT echo `echo \\`ls\\`` # CORRECT echo $(echo $(ls)) # CORRECT It solves a problem of inconsistent behavior when using backquotes: echo \\$x outputs \\$x echo `echo \\$x` outputs $x echo $(echo \\$x) outputs \\$x Backticks syntax has historical restrictions on the contents of the embedded command and cannot handle some valid scripts that include backquotes, while the newer $() form can process any kind of valid embedded script. For example, these otherwise valid embedded scripts do not work in the left column, but do work on the rightIEEE: echo ` echo $( cat <<\\eof cat <<\\eof a here-doc with ` a here-doc with ) eof eof ` ) echo ` echo $( echo abc # a comment with ` echo abc # a comment with ) ` ) echo ` echo $( echo ` echo ) ` ) Therefore the syntax for $-prefixed command substitution should be the preferred method, because it is visually clear with clean syntax (improves human and machine readability), it is nestable and intuitive, its inner parsing is separate, and it is also more consistent (with all other expansions that are parsed from within double-quotes) where backticks are the only exception and ` character is easily camouflaged when adjacent to making it even more difficult to read, especially with small or unusual fonts. Source: Why is $(...) preferred over `...` (backticks)? at BashFAQ See also: POSIX standard section 2.6.3 Command Substitution POSIX rationale for including the $() syntax Command Substitution bash-hackers: command substitution"} +{"doc_id": 133519, "author": "Alastair", "text": "You should be able to register multiple Kindle apps or devices to the one Amazon account. For example I can read the same book on my Kindle, HTC Desire using the Kinde app and the desktop application on work and home computers."} +{"doc_id": 133522, "author": "Bryan Denny", "text": "As per their FAQ: How many Kindles can I use to access titles in my library? Most books you purchase from the Kindle store may be simultaneously accessed for your personal use on up to six Kindles or Kindle-compatible devices (such as Kindle for PC or Kindle for iPhone) registered to your Amazon.com account. If the limit is less than six simultaneous copies for a specific title, youll see the message Simultaneous Device usage: Up to X simultaneous devices, per publisher limits on the Amazon.com detail page. If you reach the device limit and want to replace one of your current devices with a new one, you must first deregister and delete the content from the device you wish to replace before you can access the content in question from your new device. For device registration instructions, please see the Help page for the device you wish to register/deregister on the Kindle Support pages. Subscription content such as magazines and blogs can only be downloaded to one Kindle at a time and cannot be accessed from Kindle for PC. So yes, you can read the ebook on multiple devices, but be aware that there is a limit of 6 (or fewer) devices."} +{"doc_id": 330132, "author": "userAsh", "text": "The above answers are good. However as a beginner, I found them slightly difficult to understand and upon searching further, I found a very useful link: Linux Diff Command & Examples The site explains the concept in a simple and easy to understand manner. Diff command is easier to understand if you consider it this way : Essentially, it outputs a set of instructions for how to change one file to make it identical to the second file. Each of the following cases are explained well: a for add, c for change, d for delete"} +{"doc_id": 133526, "author": "GAThrawn", "text": "Some Kindle books have Lending enabled on them, so that you can loan them to another Kindle user for up to two weeks. The downside is that this can only be done once per book, and not all publishers allow their books to be leant out. More info here Amazon.com Lending Kindle Books"} +{"doc_id": 100760, "author": "john", "text": "I had this same issue. I went into Security & Privacy System Preference, just to look at what Spotlight was including. I checked one thing off and then back on again and it immediately stopped. Mine was using almost 100% CPU power and my fans were running nonstop. It all stopped after I did this and I have no idea why. I have a Mid 2012 MacBook Pro."} +{"doc_id": 100762, "author": "Paul", "text": "For anybody looking for this in 2018: use BetterTouchTool to make the Enter key simulate \u2318 Command + \u2193 Down Arrow key press."} +{"doc_id": 68002, "author": "Lucky", "text": "In my experience, it depends on the version of your app as well. Generally, first version takes longer and if you have kept the features more or less same, the next version is quickly approved. We submitted first beta version of our app for review last month. It took one day for Apple to approve first beta release. From that point onwards, all subsequent beta releases were instantly approved."} +{"doc_id": 133574, "author": "barrymac", "text": "The Asus EEE Pad Transformer is close to what you are after and available now. I like the way it also extendes the battery life. I would wait for something with a bit more power for this purpose though. I have a feeling that using a single core arm processor with 512Mb ram in my case would feel pretty slow as a desktop experience. Another thing to note is that Android wont support all the keyboard shortcuts that you are used to on a desktop. I hope this changes because for me it is an obvious evolution. Indeed the concept of a personal computing unit like this is something I have been waiting for for 15 years!"} +{"doc_id": 35289, "author": "jackslash", "text": "I was having difficulty finding the account picture cache on Mountain Lion 10.8.3. They were not found in ~/Library/Caches/com.apple.iChat/Pictures or ~/Library/Images/iChat Recent Pictures/ Presumably this is because iChat became Messages in Mountain LionSo I used the command line tool fs_usage to find them. By running the command sudo fs_usage -w | grep .tiff And then clicking and setting a new account picture you can see the path to the file that is written to disk when you change account pictures. Turns out that in mountain lion 10.8.3 the recent account pictures have moved to: ~/Library/Containers/com.apple.ImageKit.RecentPictureService/Data/Library/Images/Recent Pictures/"} +{"doc_id": 100832, "author": "felixx", "text": "Force quitting the process usbd made this bug finally go away on my 2017 MacBook Pro."} +{"doc_id": 297447, "author": "OJFord", "text": "If, like me, you actually wanted both; each exactly once, (this is actually either; twice) then its simple: grep -E thing1|thing2 -c and check for the output 2. The benefit of this approach (if exactly once is what you want) is that it scales easily."} +{"doc_id": 625156, "author": "Ron Maupin", "text": "SSH is an encryption protocol used for several things. Encrypting traffic in a VPN tunnel is one of them. Your traffic is encrypted using SSH, but it then needs to be wrapped in valid IP packets (tunnel) to traverse a network like the Internet. This tunnel is the VPN. Basically, your employer blocks outside network traffic, for security, unless that traffic comes through a VPN which the employer controls. A VPN may or may not encrypt the contents of the tunnel. Using SSH will encrypt the traffic carried in the VPN tunnel."} +{"doc_id": 166410, "author": "xavier_fakerat", "text": "Not sure with native Google Playstore, but you could use Yalp or a fork of Yalp, Aurora (F-droid link), it has many search filters e.g for ads, gratis, number of downloads etc.. Aurora Store is an alternate ( FOSS client) to Googles Play Store, with an elegant design, using Aurora you can download apps, update existing apps, search for apps, get details about in-app trackers and much more. Select the filter and tap apply, your search results will match your criteria. Filters in Aurora (click image for larger variant) Disclaimer I am not affiliated with Aurora, but I use it as a replacement to Google play store client Acknowledgements Xda thread"} +{"doc_id": 166412, "author": "Izzy", "text": "Theres the Playsearch website which allows you that without having to install anything beforehand: Playsearch website, your criteria marked (click image for larger variant) In addition to what you asked for, you even can specify how long ago the last update should be at maximum, what size the app should have, what Android version it should support, how well it ranks (stars) \u2013 and how wide-spread (number of installs) it should at least be."} +{"doc_id": 297488, "author": "fugitive", "text": "You can use rsync as an alternative. It is mainly for syncing files.. but you can use it for this purpose as well. rsync -avzh --stats --progress remoteuser@remoteip:/path/ localpath to add ssh options: rsync -e ssh -P $port_value remoteuser@remoteip:/path/ localpath --progress and --stats are useful for real-time display of transfer. I think it a better option then SCP, since it skips already transferred files, which is noticeable when youre copy-ing lot of files."} +{"doc_id": 166422, "author": "RockPaperLz- Mask it or Casket", "text": "There is a brand new website, still officially in beta, called AppFilter. It allows the user to search for apps, while filtering for ads, IAPs, paid/gratis, ratings, category, and more. It seems to work well, and currently has over a million apps indexed. It is free to use, and the author (no affiliation with me) is seeking donations to improve the server."} +{"doc_id": 625176, "author": "Jozef Woods", "text": "Typical reasoning is that you want to reduce the exposure and possible attack vectors as far as possible. If you begin from the premise that both SSH and VPN are required (for their own purposes), then have both externally facing means that attackers have two potential routes into your environment. If you make SSH local-only it adds an additional layer to the security of the server. Consider the following scenarios: SSH + VPN externally. Attacker needs only compromise SSH to compromise the server. SSH external. Functionally the same as the previous scenario. VPN external (SSH internal). Doubles up on security. Attacker must break through both before they can do anything to the server. Consider that alongside the fact that VPN would be nessecary for other functions, and may be better configured for external access and its a no-brainer."} +{"doc_id": 625177, "author": "dr_", "text": "hostname is the correct term when referring to the name of a machine, as opposed to its IP address. From Wikipedia: In computer networking, a hostname (archaically nodename) is a label that is assigned to a device connected to a computer network and that is used to identify the device in various forms of electronic communication such as the World Wide Web, e-mail or Usenet. With server name or machine name it is intended, well, the name (hostname) of the server or the machine. Note that the hostname (e.g. jupiter) usually doesnt include the domain name (e.g. example.org). Together, they form a FQDN (Fully Qualified Domain Name): jupiter.example.org. This is the most precise usage. However, for instance when talking about the World Wide Web as in the Wikipedia page you linked, jupiter.example.org is often called (somehow incorrectly) an hostname. In the Yahoo question you linked, theyre synonyms."} +{"doc_id": 625180, "author": "Ron Trunk", "text": "Hostname is used specifically in the context of the TCP/IP suite. Other protocols or operating systems (MS Windows) often use the term server name. So for example, a device could be referred to by its Server Name in Windows, but by its Hostname using DNS. The names can be different. Often the two terms are used interchangeably."} +{"doc_id": 199207, "author": "Bernhard", "text": "Your example works for me if you omit the braces $ tar --extract --file=test.tar.gz extract11 If your file extract11 is in a subfolder, you should specify the path within the tarball. $ tar --extract --file=test.tar.gz subfolder/extract11"} +{"doc_id": 625192, "author": "dr_", "text": "The three-way handshake is necessary because both parties need to synchronize their segment sequence numbers used during their transmission. For this, each of them sends (in turn) a SYN segment with a sequence number set to a random value n, which then is acknowledged by the other party via a ACK segment with a sequence number set to n+1."} +{"doc_id": 100904, "author": "rogerdpack", "text": "For some reason when I clicked allow it felt as if the button did nothing but then when I tried it a third time the button took (after which point it always worked). Weird. More ideas: reboot into safe mode, try the allow button there command line: sudo spctl --master-disable This is a community wiki feel free to add some more ideas here."} +{"doc_id": 199208, "author": "harish.venkat", "text": "You can also use tar -zxvf You must write the file name exacty as tar ztf test.tar.gz shows it. If it says e.g. ./extract11, or some/bunch/of/dirs/extract11, thats what you have to give (and the file will show up under exactly that name, needed directories are created automatically). -x: instructs tar to extract files. -f: specifies filename / tarball name. -v: Verbose (show progress while extracting files). -z: filter archive through gzip, use to decompress .gz files."} +{"doc_id": 625195, "author": "Eddie", "text": "Break down the handshake into what it is really doing. In TCP, the two parties keep track of what they have sent by using a Sequence number. Effectively it ends up being a running byte count of everything that was sent. The receiving party can use the opposite speakers sequence number to acknowledge what it has received. But the sequence number doesnt start at 0. It starts at the ISN (Initial Sequence Number), which is a randomly chosen value. And since TCP is a bi-directional communication, both parties can speak, and therefore both must randomly generate an ISN as their starting Sequence Number. Which in turn means, both parties need to notify the other party of their starting ISN. So you end up with this sequence of events for a start of a TCP conversation between Alice and Bob: Alice ---> Bob SYNchronize with my Initial Sequence Number of X Alice <--- Bob I received your syn, I ACKnowledge that I am ready for [X+1] Alice <--- Bob SYNchronize with my Initial Sequence Number of Y Alice ---> Bob I received your syn, I ACKnowledge that I am ready for [Y+1] Notice, four events are occurring: Alice picks an ISN and SYNchronizes it with Bob. Bob ACKnowledges the ISN. Bob picks an ISN and SYNchronizes it with Alice. Alice ACKnowledges the ISN. In actuality though, the middle two events (#2 and #3) happen in the same packet. What makes a packet a SYN or ACK is simply a binary flag turned on or off inside each TCP header, so there is nothing preventing both of these flags from being enabled on the same packet. So the three-way handshake ends up being: Bob <--- Alice SYN Bob ---> Alice SYN ACK Bob <--- Alice ACK Notice the two instances of SYN and ACK, one of each, in both directions. So to come back to your question, why not just use a two-way handshake? The short answer is because a two way handshake would only allow one party to establish an ISN, and the other party to acknowledge it. Which means only one party can send data. But TCP is a bi-directional communication protocol, which means either end ought to be able to send data reliably. Both parties need to establish an ISN, and both parties need to acknowledge the others ISN. So in effect, what you have is exactly your description of the two-way handshake, but in each direction. Hence, four events occurring. And again, the middle two flags happen in the same packet. As such three packets are involved in a full TCP connection initiation process."} +{"doc_id": 297516, "author": "Tom Kelly", "text": "Unless youre looking for a specific bit rate, Id recommend the -crf option. This is most commonly used for x264 encoding as described in this article. In short: a constant rate factor (CRF) of 23 would make a DVD quality movie (~700MB - 1GB) and lower CRF values would be higher quality (larger files). An example from the linked article: ffmpeg -i input.mp4 -c:v libx265 -crf 28 output.mp4"} +{"doc_id": 625201, "author": "Sergio", "text": "TCP connection is bidirectional. What this means is that it actually is a pair of one-way connections. The initiator sends SYN, the responder sends ACK: one simplex connection begins. Then the responder sends SYN, the initiator sends ACK: another simplex connection begins. Two simplex connections form one duplex TCP session, agree? So logically there are four steps involved; but because SYN and ACK flags are different fields of TCP header, they can be set simultaneously - the second and the third steps (of the four) are combined, so technically there are three packet exchanges. Each simplex (half-)connection uses 2-way exchange, as you proposed."} +{"doc_id": 461364, "author": "Lars", "text": "I wrote a fairly lengthy post on this topic. Here is an excerpt I thought about this problem for a quite while. I decided to write up my own personal solution as a general process. The steps I have documented are as follows: Create Vocabulary Sheet Learn the Application Browse Available Documentation Make Assumptions Locate Third Party Libraries Analyze Code This process is written in the context of a large desktop application, but the general techniques are still applicable to web applications, and smaller modules. taken from: A Process For Learning A New Codebase"} +{"doc_id": 625212, "author": "dragosb", "text": "First of all the variable subnet mask technique did become insufficient. That is why people invented the Network address translation technique where you can use public IP to mask multiple private IPs. Even with this technique, we are almost out of IPs to allocate. Also NAT breaks one of the founding principles of the Internet: the end to end principle. So the main reason for using IPv6 is that everyone will have available as many public IPs as they need and all the complexity of using NAT will disappear. IPv6 also provides other functionality that I will not go into detail:mandatory security at the IP level, enables Stateless address auto configuration, no more broadcasting only multicasting and provides a more efficient processing by routers by simplifying the header. Also in this age of mobile devices it has explicit support for mobility in the form of mobile IPv6. Regarding your proposal of using subnet/subnet masks:it does not sound feasible since its implementation would break all existing applications and it is not really elegant. If you have to change things why not go for something new and well thought."} +{"doc_id": 625214, "author": "Ron Maupin", "text": "The Internet Protocol (IP) was designed to provide end-to-end connectivity. The 32 bits of an IPv4 address only allow for about 4.3 billion unique addresses. Then you must subtract a bunch of addresses for things like multicast, and there is a lot of math showing that you can never use the full capacity of a subnet, so there are a lot of wasted addresses. There are about twice as many humans as there are usable IPv4 addresses, and many of those humans consume multiple IP addresses. This doesnt even touch on the business needs for IP addresses. Using NAT to satisfy the IP address hunger breaks the IP end-to-end connection paradigm. It becomes difficult to expose enough public IP addresses. Think for a minute what you, as a home user with only one public IP address, would do if you want to allow multiple devices using the same transport protocol and port, say two web servers, which by convention use TCP port 80, to be accessed from the public Internet. You can port forward TCP port 80 on your public IP address to one private IP address, but what about the other web server? This scenario will require you to jump through some hoops which a typical home user isnt equipped to handle. Now, think about the Internet of Things (IoT) where you may have hundreds, or thousands, of devices (light bulbs, thermostats, thermometers, rain gauges and sprinkler systems, alarm sensors, appliances, garage door openers, entertainment systems, pet collars, and who knows what all else), some, or all, of which want to use the same specific transport protocols and ports. Now, think about businesses with IP address needs to provide their customers, vendors, and partners with connectivity. IP was designed for end-to-end connectivity so, no matter how many different hosts use the same transport protocol and port, they are uniquely identified by their IP address. NAT breaks this, and it limits IP in ways it was never intended to be limited. NAT was simply created as a way to extend the life of IPv4 until the next IP version (IPv6) could be adopted. IPv6 provides enough public addresses to restore the original IP paradigm. IPv6 currently has 1/8 of the IPv6 addresses in the entire IPv6 address block set aside for globally routable IPv6 addresses. Assuming there are 17 billion people on earth in the year 2100 (not unrealistic), the current global IPv6 address range (1/8 of the IPv6 address block) provides over 2000 /48 networks for each and every one of those 17 billion people. Each /48 network is 65,536 /64 subnets with 18,446,744,073,709,551,616 addresses per subnet."} +{"doc_id": 625216, "author": "Sander Steffann", "text": "Two things are getting confused here: classful addressing vs CIDR Masquerading / NAT Going from classful addressing to Classless Inter Domain Routing (CIDR) was an improvement that made the address distribution to ISPs and organisations more efficient, thereby also increasing the lifetime of IPv4. In classful addressing an organisation would get one of these: a class A network (a /8 in CIDR terms, with netmask 255.0.0.0) a class B network (a /16 in CIDR terms, with netmask 255.255.0.0) a class C network (a /24 in CIDR terms, with netmask 255.255.255.0) All of these classes were allocated from fixed ranges. Class A contained all addresses where the first digit was between 1 and 126, class B was from 128 to 191 and class C from 192 to 223. Routing between organisations had all of this hard-coded into the protocols. In the classful days when an organisation would need e.g. 4000 addresses there were two options: give them 16 class C blocks (16 x 256 = 4096 addresses) or give them one class B block (65536 addresses). Because of the sizes being hard-coded the 16 separate class C blocks would all have to be routed separately. So many got a class B block, containing many more addresses than they actually needed. Many large organisations would get a class A block (16,777,216 addresses) even when only a few hundred thousand were needed. This wasted a lot of addresses. CIDR removed these limitations. Classes A, B and C dont exist anymore (since \u00b11993) and routing between organisations can happen on any prefix length (although something smaller than a /24 is usually not accepted to prevent lots of tiny blocks increasing the size of routing tables). So since then it was possible to route blocks of different sizes, and allocate them from any of the previously-classes-A-B-C parts of the address space. An organisation needing 4000 addresses could get a /20, which is 4096 addresses. Subnetting means dividing your allocated address block into smaller blocks. Smaller blocks can then be configured on physical networks etc. It doesnt magically create more addresses. It only means that you divide your allocation according to how you want to use it. What did create more addresses was Masquerading, better known as NAT (Network Address Translation). With NAT one device with a single public address provides connectivity for a whole network with private (internal) addresses behind it. Every device on the local network thinks it is connected to the internet, even when it isnt really. The NAT router will look at outbound traffic and replace the private address of the local device with its own public address, pretending to be the source of the packet (which is why it was also known as masquerading). It remembers which translations it has made so that for any replies coming back it can put back the original private address of the local device. This is generally considered a hack, but it worked and it allowed many devices to send traffic to the internet while using less public addresses. This extended the lifetime of IPv4 immensely. It is possible to have multiple NAT devices behind each other. This is done for example by ISPs that dont have enough public IPv4 addresses. The ISP has some huge NAT routers that have a handful of public IPv4 addresses. The customers are then connected using a special range of IPv4 addresses (100.64.0.0/10, although sometimes they also use normal private addresses) as their external address. The customers then again have NAT router that uses that single address they get on the external side and performs NAT to connect a whole internal network which uses normal private addresses. There are a few downsides to having NAT routers though: incoming connections: devices behind a NAT router can only make outbound connections as they dont have their own real address to accept incoming connections on port forwarding: this is usually made less of a problem by port forwarding, where the NAT routed dedicates some UDP and/or TCP ports on its public address to an internal device. The NAT router can then forward incoming traffic on those ports to that internal device. This needs the user to configure those forwardings on the NAT router carrier grade NAT: is where the ISP performs NAT. Yyou wont be able to configure any port forwarding, so accepting any incoming connections becomes (bit torrent, having your own VPN/web/mail/etc server) impossible fate sharing: the outside world only sees a single device: that NAT router. Therefore all devices behind the NAT router share its fate. If one device behind the NAT router misbehaves its the address of the NAT router that ends up on a blacklist, thereby blocking every other internal device as well redundancy: a NAT router must remember which internal devices are communicating through it so that it can send the replies to the right device. Therefore all traffic of a set of users must go through a single NAT router. Normal routers dont have to remember anything, and so its easy to build redundant routes. With NAT its not. single point of failure: when a NAT router fails it forgets all existing communications, so all existing connections through it will be broken big central NAT routers are expensive As you can see both CIDR and NAT have extended the lifetime of IPv4 for many many years. But CIDR cant create more addresses, only allocate the existing ones more efficiently. And NAT does work, but only for outbound traffic and with higher performance and stability risks, and less functionality compared to having public addresses. Which is why IPv6 was invented: Lots of addresses and public addresses for every device. So your device (or the firewall in front of it) can decide for itself which inbound connections it wants to accept. If you want to run your own mail server that is possible, and if you dont want anybody from the outside connecting to you: thats possible too :) IPv6 gives you the options back that you used to have before NAT was introduced, and you are free to use them if you want to."} +{"doc_id": 625236, "author": "Kevin Keane", "text": "The biggest concern is likely to identify where your bottlenecks are going to be, in terms of route aggregation. The basic parameters are likely going to be: each subnet must be a /64 (dictated by IPv6), and you have a /60, /56, or /48 to play with. As others have said, a /48 gives you 64k subnets, but its still easy to paint yourself into a corner if you just assign them randomly. Lets say you have 1000 store locations, and give each one a /64 sequentially from the start. Then you find out that the 43rd store needs a second subnet - that means, either renumbering that network, or giving the store two separate subnets that cant be aggregated. Incidentally, in the IPv4 world, you also get 64k subnets if you use the 10.x.x.x network and subnet it to /24s. Some of the practices you use in that scenario may translate nicely. One company I work for uses 10.x.x.x internally for about 150 branch offices (with some 100-500 computers at each location). The second byte is the branch number, and they use /22 instead of /24 for their subnets. So each branch office can have up to 64 subnets, which works nicely for them."} +{"doc_id": 133719, "author": "user6014", "text": "I have a Samsung Charge, and was able to disable the annoying Battery Full notification by using Tasker. As others have stated, the Tasker solution I used is not specific to disabling only the BatteryFull notification when reached, but it does limit the amount of time notifications are disabled and when. What I did with Tasker: Contexts-Power, Time between 11p-6a, Battery between 99-100% ...Task - disable notification Contexts - Power, Time between 11p-6a, Battery Full ... Task - enable notification. This should limit the amount of time notifications are disabled to only a few minutes, since it only takes that long to charge the battery from 99 to 100%."} +{"doc_id": 199256, "author": "BlueBomber", "text": "You can try Ctrl+L. It clears and/or redraws the terminal screen, depending on the program."} +{"doc_id": 166489, "author": "Trevor", "text": "This should help some folks and it does enable better control, not louder volume. Go to developer options (enable developer options if you havent previously done so). Then, turn on Disable absolute volume. You may need to restart your Android device for it to take effect. The steps now are way more reasonable. In other words, it answers your Note, as now the volume (at least on my Bluetooth headphones) now has much smaller steps -- it no longer goes from too quiet to too loud in one step."} +{"doc_id": 100956, "author": "Matthew Briggs", "text": "An excellent command line tool that works very well is tifig. It can be easily compiled on multiple platforms. It should compile on any platform with a relatively modern development toolchain, that should include El Capitan!"} +{"doc_id": 35427, "author": "Lri", "text": "One option would be to Cut and paste using \u2318-ALT-X and \u2318V after resizing the canvas size in destination file. The pasted image doesnt seem to get snapped to canvas edges now. Increasing canvas size by cropping doesnt seem to be possible."} +{"doc_id": 625255, "author": "Ron Maupin", "text": "You have to remember that models like OSI are just that, models. They are theoretical. The real world doesnt fall neatly into these models. For the most part, routing is a layer-3 function, but, as you pointed out, BGP uses a layer-4 protocol to communicate with other BGP speakers in order to do what is normally considered a layer-3 function. Many network protocols fall into a gray area, or are considered in one layer while using another layer. Take ARP for instance. It resolves layer-3 addresses to layer-2 addresses. Which layer should it be considered to be in? Understanding the models is useful, but the models are not mandated by any organization, and you are free to create protocols and functions that do not follow any model."} +{"doc_id": 199271, "author": "Eric", "text": "Even though GCC is GPLv3, the resulting binaries produced by GCC never had any license constraint. In clear you can use GCC to build software that falls under the license you want. Even the C library that comes with GCC and that is included in the binary is license-free. http://www.gnu.org/licenses/gcc-exception-faq.html Section 2 of the GNU GPLv3: You have permission to propagate a work of Target Code formed by combining the Runtime Library with Independent Modules, even if such propagation would otherwise violate the terms of GPLv3, provided that all Target Code was generated by Eligible Compilation Processes. You may then convey such a combination under terms of your choice, consistent with the licensing of the Independent Modules. \u201cEligible means that the compilation does not involve both GCC and GPL-incompatible software. Thats not a restriction: BSD-licensed software can be used in the build process involving GNU GCC. As you can see, contrary to what has been said above, there is no REAL license-related reason to move away from GCC as there is no incompatibility with using GCC inside FreeBSD. The real reason behind this change is political and opportunistic: BSD has its own licensing which philosophically competes with the GNU Public license (as *ire_and_curses* explained above), CLANG is a new non-GPL compiler initiated by a sponsor of FreeBSD that appears to be technically equivalent to the GPL-licensed GCC (as described above by *ire_and_curses*). These facts creates an opportunity for FreeBSD to move away from GCC and get rid of it: theyre not actually legally compelled to, as they could well still use GCC to build free or BSD-licensed software, but they want to stick to the all BSD licensed software philosophy."} +{"doc_id": 625257, "author": "Xavier Nicollet", "text": "BGP is on top of TCP, so it would be Internet layer 4, OSI layer 7. Usually external BGP is done only between 2 directly connected peers, enforced by setting TTL flag on IP header, which is located at layer 3."} +{"doc_id": 133741, "author": "jlehenbauer", "text": "TubeMate YouTube Downloader is a great app that will let you download either the audio or the video from a YouTube file and save it to your device. When it is on your device, you are then able to use a media player (such as DoubleTwist) to play either in the background. But as for straight background music from YouTube, thats a no-go :/"} +{"doc_id": 363118, "author": "J11", "text": "using journalctl write logs to a text file and read it bottom up journalctl -u service-name.service > file_name.txt tail file_name.txt"} +{"doc_id": 264824, "author": "faelx", "text": "For me (on Mac OS X 10.9.5), adding the path name (e.g. /mypathname) to the file /etc/paths worked very well. Before editing, echo $PATH returns: /usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin After editing /etc/paths and restarting the shell, the $PATH variable is appended with /pathname. Indeed, echo $PATH returns: /usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/mypathname What happened is that /mypathname has been appended to the $PATH variable."} +{"doc_id": 559740, "author": "Jeff Clayton", "text": "Not at all a guarantee. HTTPS means that the web page has SSL, which simply means that your connection to the page is encrypted. The content on the page could be anything that could be posted on any web site whether encrypted by SSL or not. Additionally, as listed in the answers in the comments below, you can be fooled into a false sense of security when (in different types of examples) the target server is compromised, or a hacker redirects your https site data to a different https encrypted location. You can still be encrypted to a site, but possibly even a fake site that looks like the real one instead."} +{"doc_id": 559741, "author": "Xander", "text": "No, HTTPS does not necessarily mean that a site is not malicious. HTTPS means very little as to the security of a site. Its specifically geared to keep your communication with the site secure from eavesdroppers and tampering, but offers nothing as to the security of the site itself. Yes, a site serving content over HTTPS has a certificate. That means that the individual who requested the certificate from the CA has an email address that is associated with the domain. Except in the case of Extended Validation certificates (the ones that offer a green address bar) this is literally all it means. Nobody from the CA is validating that the site is safe, secure, and not serving malware. Any site, with an SSL cert or without, can have bugs and vulnerabilities that allow an attacker to leverage them to serve an exploit. Or a admin or user who has the ability to either maliciously or unknowingly cause the site to serve malware. Even if the site itself does not, if it serves advertisements (or any other content, for that matter) from an ad network or another site, that could be vulnerable. So, HTTPS means that nobody should be able to view or tamper with your traffic. That is all that it means."} +{"doc_id": 133756, "author": "Bryan Denny", "text": "MightyText is a Google Chrome extension/Android app that will let you compose, send, and receive text messages in Google Chrome by connecting to your phone. So it just forwards received messages from your phone to Chrome, and sent messages from Chrome to your phone."} +{"doc_id": 363139, "author": "champion-runner", "text": "To remove a single line from the history file, use the -d option. For example, if you want to clear a command where you entered the clear-text password as in the scenario above, find the line number in the history file and run this command. $ history -d 2038 To delete or clear all the entries from bash history, use the history command below with the -c option. $ history -c Alternatively, you can use the command below to delete the history of all last executed commands permanently in the file. $ cat /dev/null > ~/.bash_history Also With Bash 5, you can delete a range aswell history -d 511-520"} +{"doc_id": 363142, "author": "champion-runner", "text": "Use journalctl to View Your Systems Logs View journalctl without PagingPermalink To send your logs to standard output and avoid paging them, use the --no-pager option: journalctl --no-pager It\u2019s not recommended that you do this without first filtering down the number of logs shown. journalctl -u service-name.service Show Logs within a Time RangePermalink Use the --since option to show logs after a specified date and time: journalctl --since 2018-08-30 14:10:10 Use the --until option to show logs up to a specified date and time: journalctl --until 2018-09-02 12:05:50 Combine these to show logs between the two times: journalctl --since 2018-08-30 14:10:10 --until 2018-09-02 12:05:50 More info"} +{"doc_id": 559752, "author": "h22", "text": "Yes, it can easily be - malicious JavaScript or viruses can be transferred over HTTPS as easily as over HTTP no problem. It may be somewhat less likely as the source of the valid verified HTTPS message is known. However still may happen if the HTTPS site has had security hole, has been attacked, compromised and malicious content has been installed on it. It will not be for long, soon the administrator know one or another way and remove the malware. However I would prefer to avoid trusting the content just because it was delivered over HTTPS."} +{"doc_id": 559769, "author": "Hutch", "text": "In addition to the other points raised, its worth mentioning that even a trusted site (for example, your bank), could still be infected by a virus that makes it behave maliciously. So even if you trust the organization, https still does not guarantee that the website doesnt do malicious things."} +{"doc_id": 101022, "author": "fsb", "text": "This can happen for a number of reasons and you might not know why. First, do the easy stuff, like you already did. Search with Spotlight, check for a new Home screen by swiping to the right, and check all your folders. If those dont work, try a hard reset. After you restart the iPhone, search for the app again. If the app is still missing, delete the app and reinstall it from the App Store. To delete the app (in iOS 11), go to Settings -> General -> iPhone Storage and find the app. Tap the app and on the next screen select Delete App. After the apps deleted, go back to the App Store and redownload the app again. Hopefully, it will install correctly this time and youll be able to see it on a Home screen."} +{"doc_id": 395935, "author": "Chris", "text": "This strictly depends on the company. But the words are fairly obvious: entry level is someone who is just entering the field, junior is someone who is beyond entry level and knows there way around a few languages technologies. Lastly senior are those who are more in charge of the project and sit at a higher level. They can usually delegate as much as they code."} +{"doc_id": 395936, "author": "Walter", "text": "Its going to boil down to the companys expectations of the programmer. If Im the hiring company and I hire an Entry Level programmer, I know that that person knows next to nothing and were going to assume he/she needs to learn everything. If I hire a Senior level person, in theory, theyll be self sufficient, from a technical stand point."} +{"doc_id": 395937, "author": "Steven A. Lowe", "text": "Entry Level - must give them explicit instructions, check everything they do, little or no design responsibility, no analysis responsibility Junior - less explicit instructions, less checking, some minor design and analysis responsibility; helps the entry-level people find the compiler and use the repository Senior - major design and analysis responsibility, is expected to correct oversights on his/her own, little/no checking, little/no instructions; helps the junior-level people learn/improve analysis and design skills"} +{"doc_id": 395938, "author": "Wonko the Sane", "text": "As most have said, it varies from company to company, and job to job. For instance, I once worked at a company that considered anybody who worked there for more than 5 years a Senior Software Engineer. At the other extreme, another place I worked had very stringent definitions (and associated pay scales). Some places may consider entry level and junior to mean the same thing. It can vary based on years of experience, general knowledge, specific knowledge (i.e. knowing the features of a certain language), managerial and/or leadership experience, all combinations thereof, and certainly much more."} +{"doc_id": 199327, "author": "Marcus", "text": "If you want to get rid of Removing leading `/ from member names being printed to STDERR, but still want to leave off those leading slashes as tar wisely does by default, I saw an excellent solution here by commenter timsoft. The solution involves using -C option to change directory to the root (/), then specifying the file tree to archive without a leading slash, because now you only need a relative path. This does the same thing as a normal tar create command, but no stripping is needed: tar fcz bkup.tar.gz -C / home/foo/"} +{"doc_id": 395941, "author": "MattC", "text": "Im going to go with the really simple answer here: senior developers (in general) are people that can see the forest for the trees. They think beyond just the immediate issue in front of their faces and have an understanding of what architectural changes could or should happen as a result of fixing the problem they are faced with. Most software companies Ive seen have the entry level people doing the day to day coding while the senior devs are overseeing what the new people are doing and tackling the really ugly, thorny issues. Obviously this is just my opinion, and not a hard-and-fast rule. YMMV."} +{"doc_id": 395945, "author": "Todd Williamson", "text": "I think the old school craftsman slots of apprentice, journeyman and master fit into these slots well for entry level, junior (or just no prefix) and senior. Someone entry level is given relatively simple tasks that do not have profound consequences and their work is checked by a junior or senior. Over time they get more responsibility and are given more complex tasks, learning the ropes along the way. At a junior (or just the removal of entry level / junior from the title / description) you have completed your apprenticeship and have covered the major areas of development for your company, having dipped into each major area so that you are familiar with each of them. You now help provide guidance and input for the apprentice equivalent, but your own work is still reviewed by the master / senior, though perhaps not as much as when you were a junior. Over time and the delivery of successful projects you eventually become senior. At a senior level you have mastered everything that is covered in your area, covering the entire development process and all the tools and technologies that are involved. You are empowered to make significant technical decisions and are expected to provide insight to management into the software development process. So, given those guidelines you should be able to look at a person or a position and determine which of the three bins they land in."} +{"doc_id": 395959, "author": "Craig", "text": "Really, I think it just comes down to how long you have been on the job. If you have 10 years experience you are a senior dev, if you are a graduate then you are probably entry level. I have seen many senior devs who could hardly code and didnt really know what they were doing and many junior devs who were fantastic."} +{"doc_id": 199353, "author": "Hitesh Chechani", "text": "In a terminal, press Ctrl+G and then Enter"} +{"doc_id": 330426, "author": "Optimus Prime", "text": "Instead of setting profile, what helped me was setting the PATH. Some of the commands were not available in my cron scripts as the PATH is different. ENVIRONMENT=prod PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin */30 * * * * /opt/myscript1.sh */30 * * * * /opt/myscript2.sh */30 * * * * /opt/myscript3.sh Setting PATH with the path of commands helped me. Even better if you can use a template and detemplatize later, ENVIRONMENT={{ENVIRONMENT}} PATH={{PATH}} */30 * * * * /opt/myscript1.sh */30 * * * * /opt/myscript2.sh */30 * * * * /opt/myscript3.sh Passing variables to each item looks messy."} +{"doc_id": 297662, "author": "Lonniebiz", "text": "For me, the issue was that I was logged in more than once (via ssh) and on one of the logins I was at command prompt where the pwd was inside a folder subordinate to the mount-point."} +{"doc_id": 264894, "author": "Wolf Halton", "text": "This was confusing, and has 2 possible good answers, depending on whether the user is attempting to add a ssh key on a Linux or on Windows (as I am). This probably doesnt answer the OP, but is an expansion for git-bash. I run both Windows and Ubuntu for development, and my git installation is slightly different in each. Try this: go to C:\\$Installation_Folder$\\Git\\cmd and execute: start-ssh-agent It will open a cmd command and run the ssh-agent the right way. .. was a good Windows answer, but failed to specify that you were expected to go through Windows Explorer to find the Git installation folder, and run the the Windows shell would open on completion of step 1. go to C:\\$Installation_Folder$\\Git\\cmd Step 2: you just need to double-click start-ssh-agent On step 3, you go back to git-bash or whichever *nix terminal emulator you are running and run ssh-add. If you used the default name for the ssh public key, you dont have to add the name as ssh-add uses that automatically."} +{"doc_id": 35527, "author": "Blake", "text": "Before iOS 10, the only method that existed would be possible with a jailbroken device as Apple didn\u2019t offer this feature before then."} +{"doc_id": 297673, "author": "Vouze", "text": "The main difference is in the error handling. In the following case the error is reported $ /bin/cat < z.txt -bash: z.txt: No such file or directory $ echo $? 1 In the following case the error is not reported. $ cat z.txt | /bin/cat cat: z.txt: No such file or directory $ echo $? 0 With bash, you can still use PIPESTATUS : $ cat z.txt | /bin/cat cat: z.txt: No such file or directory $ echo ${PIPESTATUS[0]} 1 But it is available only immediately after the execution of the command : $ cat z.txt | /bin/cat cat: z.txt: No such file or directory $ echo $? 0 $ echo ${PIPESTATUS[0]} 0 # oops ! There is another difference, when we use shell functions instead of binaries. In bash, functions that are part of a pipeline are executed in sub-shells (except for the last pipeline component if the lastpipe option is enabled and bash is non-interactive), so the change of variables have no effects in the parent shell: $ a=a $ b=b $ x(){ a=x;} $ y(){ b=y;} $ echo $a $b a b $ x | y $ echo $a $b a b $ cat t.txt | y $ echo $a $b a b $ x | cat $ echo $a $b a b $ x < t.txt $ y < t.txt $ echo $a $b x y"} +{"doc_id": 559825, "author": "sebastian nielsen", "text": "I Think people here does not understand the question: If you have a unsafe line, and you make a successful SSH/SSL Connection over this line, he now ask if its safe to make the assumption that the line is secure and that unencrypted data can be passed ALONGSIDE with the encrypted Connection (eg, in plain sight, not inside the encrypted SSL/SSH Connection). I would say no. In this case, there could be a passive eavesdropper that simply ignores encrypted data and saves unencrypted data. BUT you can be sure theres no Active eavesdropper (MITM), which means you can safely establish a unauthenticated SSL/SSH Connection with the same source/destination as the authenticated line. This provided theres no selective eavesdropper that MITM certain connectors, BUT however, the eavesdropper cannot know if you going to authenticate the Connection or not, so he cannot know which Connection to MITM to evade detection. The MITMer would, if he MITM, MITM all Connections and hope people click through all authentication dialogs simply. Thus: If you connect authenticated to a SSL service from lets say 123.123.123.123 to 24.24.24.24, you can also safely connect a SSH client from 123.123.123.123 to 24.24.24.24 without mutually authenticating the SSH fingerprint, provided you can trust everything behind the other sides NAT router or firewall. But even if thats safe generally meant, there IS a small risk that a eavesdropper simply random MITM Connections and hope for not being detected, so since you already have a authenticated Connection to the target IP, why not use that authenticated Connection to mutually verify the SSH fingerprint? Its simple as posting the correct SSH fingerprint on a SSL secured website!"} +{"doc_id": 559826, "author": "Mike Samuel", "text": "No. Traffic analysis can still tell someone a lot. Traffic analysis is the process of intercepting and examining messages in order to deduce information from patterns in communication. It can be performed even when the messages are encrypted and cannot be decrypted. In general, the greater the number of messages observed, or even intercepted and stored, the more can be inferred from the traffic. TLS is usually deployed to preserve confidentiality -- an attacker should not reach a high level of confidence about the contents of communication. Assuming, an attacker knows your protocol, an attacker knows who is communicating with whom the attacker cannot decrypt messages. you do not obscure your real traffic in a lot of nonsense traffic (chaff) An attacker can probably tell when you are awake and when you are asleep regardless of protocol, and may be able to tell a lot more depending on the nature of the protocol youre using. If your protocol is very simple: You send a message fire the nukes at ... when you want to fire nukes You dont send a message when you dont want to fire any nukes. An eavesdropper who cant decrypt your data can determine by the mere presence of a message that you want to fire nukes, though maybe not at whom. If your protocol is more complex: You ask for a book. I send you the content. An attacker may not be able to tell who is reading War and Peace vs Atlas Shrugged but can distinguish, based purely on message size, whether they are reading one of the former vs. Kafkas 55 page novel The Metamorphosis."} +{"doc_id": 232147, "author": "Ashish Saini", "text": "$ whoami This command prints the current user. To change users, we will have to use this command (followed by the users password): $ su secondUser Password: After entering the correct password, you will be logged in as the specified user (which you can check by rerunning whoami."} +{"doc_id": 297681, "author": "ks1322", "text": "This feature request is not yet implemented in youtube-dl. See #622 issue and many duplicates of it on github."} +{"doc_id": 101078, "author": "user3063895", "text": "Yes, you can do it: https://community.bose.com/t5/Headphones-Archive/Share-Audio-from-MacOS-Macbook-Pro-with-two-or-more-Bluetooth/td-p/46007 Here are the steps I took: Connect both headphones via bluetooth. Both should say connected on the Bluetooth setting page. In Finder menu hit Go -> Utilities -> Audio Midi Setup You should see both your headphones listed on the left here. Hit the + button at the bottom left and select Create Multi-Output Device (You may rename the device if you wish) Select your bose headphones from the list on the right to add to the Multi-Output Device. Right-click on the Multi-Output Device you just created and use device for sound output and play alerts and sound through this device. You may also select which device as your master device to sync across all your connected headphones. I also selected Drift Correction for slave devices - Im not 100% sure what this does but I think it makes sure that your devices stay in sync to the audio/video playing on your computer. Watch away. Note that you wont be able to adjust ouput volume from Macbook any more, instead volume is adjusted individually at each headphone, which is pretty neat."} +{"doc_id": 330473, "author": "Bram", "text": "No dependencies, one C file, one header file: imcat Works on linux, macos, windows. Automatically scales to terminal width, with quality down-sampling. Remark: seriously? Down-voted? Next time, maybe explain in comment why."} +{"doc_id": 396026, "author": "lord-fu", "text": "Simply put and from personal observations found on job posting sites and only regarding experience levels. Entry = Your new probably your first job. Junior = Your good but not supposed to be the best, you also usually have less then 5 years and more than 2 years experience. Senior = You are supposed to be the best and do have more than 5 years experience."} +{"doc_id": 68353, "author": "AlessioX", "text": "If you want some free apps, you might want to check this out: iStat Pro Widget 4.92. Its the same as iStat Menus suggested by @Mikey T.K., but: pros: free cons: discontinued And its not a proper app: as its name suggests its a widget, so youll find it in the Dashboard."} +{"doc_id": 101147, "author": "criscom", "text": "If you are using keyboardmaestro simply create a macro in the Global Macro Group (if you want to have the macro available globally) as follows: Create new macro Give the macro a name like insert \u2318 Add Typed String Trigger: =cmd Add New Action and choose Insert Text by Pasting Now whenever you type the string =cmd a \u2318 will be inserted."} +{"doc_id": 2847, "author": null, "text": "If the plist file is in the XML format, you can edit it in any text editor like TextEdit. If the plist file is in the binary format, you can convert it to XML first by running: plutil -convert xml1 file.plist If you want to go back to binary format after editing: plutil -convert binary1 file.plist If you have Xcode 4.3 or later, you can use it to edit property lists in a graphical editor like this: Xcode 4.2 and earlier came with a separate application for editing property lists (/Developer/Applications/Utilities/Property List Editor.app/)."} +{"doc_id": 2850, "author": "SeniorShizzle", "text": "A PList file, like Mankoff said, is just a specially formatted XML file, so you can actually edit and create them in a program like TextEdit. Because of the specific nature of PLists to Development, however, using a special program like Xcode or Property List Editor becomes a much more fruitful endeavor. This is because it not only automatically formats the XML code for you, but it will actually translate the key identifiers and layers into readable words, and also for some values it will provide a drop-down menu to fill in the correct responses. Especially when dealing with iPhone plists, when multiple runtime variables can be set using the Plist, easily creating new fields and knowing what to put in them makes it so much easier. You can get bot Xcode and PList Editor from the Apple Developer website http://developer.apple.com for free by downloading the latest Xcode release."} +{"doc_id": 101165, "author": "Guglie", "text": "You can press Control + t while the dd command is running or for a nice progress bar you can install pv (pipe viewer) via Homebrew: brew install pv and then execute: sudo dd if=disk-image.img | pv | sudo dd of=/dev/disk2 or (knowing size of the image, 16GB in this example): dd if=disk-image.img | pv -s 16G | dd of=/dev/disk2 Example output 2: (data transferred, elapsed time, speed, progress bar and estimated time): 1.61GiB 0:12:19 [2.82MiB/s] [===> ] 10% ETA 1:50:25"} +{"doc_id": 101168, "author": null, "text": "First of all, install Homebrew Package Manager. Then you have to install pv and dialog with this command: brew install pv dialog You can then run this command to get a progress bar with the command: dd if=disk.img bs=1m | pv disk.img | dd of=/dev/diskX bs=1m but make sure to replace disk.img with the path to the image and diskX with your SD cards disk identifier. If you want something more graphical, you can try this: (dd if=disk.img bs=1m | pv -n disk.img | dd of=/dev/diskX bs=1m conv=notrunc,noerror) 2>&1 | dialog --gauge Writing image to SD card... 10 70 0 Source: https://askubuntu.com/a/516724/765767"} +{"doc_id": 232253, "author": "Pierre-Olivier Vares", "text": "Hardlink creation on directories would be unrevertable. Suppose we have : /dir1 \u251c\u2500\u2500this.txt \u251c\u2500\u2500directory \u2502 \u2514\u2500\u2500subfiles \u2514\u2500\u2500etc I hardlink it to /dir2. So /dir2 now also contains all these files and directories What if I change my mind? I cant just rmdir /dir2 (because it is non empty) And if I recursively deletes in /dir2... it will be deleted from /dir1 too! IMHO its a largely sufficient reason to avoid this! Edit : Comments suggest removing the directory by doing rm on it. But rm on a non-empty directory fails, and this behaviour must remain, whether the directory is hardlinked or not. So you cant just rm it to unlink. It would require a new argument to rm, just to say if the directory inode has a reference count > 1, then only unlink the directory. Which, in turns, break another principle of least surprise : it means that removal of a directory hardlink I just created is not the same as removal of a normal file hardlink... I will rephrase my sentence : Without further development, hardlink creation would be unrevertable (as no current command could handle the removal without being incoherent with current behaviour) If we allow more development to handle the case, the number of pitfalls, and the risk of data loss if youre not enough aware of how the system works, such a development implies, is IMHO a sufficient reason to restrict hardlinking on directories."} +{"doc_id": 2880, "author": "Josh Hunt", "text": "The only way to play the music from your iPod or iPhone is to set iTunes to Manually manage music and videos on your device. To do this just select your iPhone under Devices on the left, and make sure Manually manage music and videos is selected. Click Apply, and now your music should not be greyed out and you are free to play it. You should then be able to use Apples Remote iPhone application to control iTunes from your device. If you have all your music (and everything else) backed up already, just try it. iTunes can manage iPods funny sometimes and can do strange things (such as warnings like you describe that dont amount to anything). I have done this before to watch a video on another and no content was removed from my iPod touch at all."} +{"doc_id": 2881, "author": null, "text": "You can do this with iTunes itself. Enable manual sync mode and you can then read right off the disk for playback. Here is an article with further details."} +{"doc_id": 2882, "author": null, "text": "Youre talking about DAAP, which is what is used to present and stream music in iTunes (although iTunes also has encryption & friends, but well leave that out of the conversation for the moment). I think that this article on an app that allows you to turn your iPhone into a DAAP server might be of considerable interest to you. As in, it allows you to stream your music from your iPhone to another DAAP-capable client, like iTunes...or Banshee...or Rhythmbox... Anyways, if that doesnt work, you at least know what youre looking for...and thats half the battle. Find a DAAP server for iPhone and youre pretty much set."} +{"doc_id": 330563, "author": "jox", "text": "For a colored git diff piped into less, this works: git -c color.diff=always diff [...] | less -R"} +{"doc_id": 2884, "author": null, "text": "I know Im replying on an old question, but I came in here googling. My iPhone 4 had the same behavior, I just updated to iTunes 10 and I my music is no longer greyed out. In essence: upgrade to iTunes 10 and you should be able to do this."} +{"doc_id": 232261, "author": "pratik", "text": "For testing if variable is empty or contain spaces, you can also use this code: ${name:?variable is empty}"} +{"doc_id": 2886, "author": "Doug Harris", "text": "Download and run GrandPerspective for a nice graphical view of whats occupying disk space -- something like this: Run this before and after the reboot and you should be able to see what the big differences are."} +{"doc_id": 2888, "author": "Chealion", "text": "It sounds like the space recovered is from your swap (vm) and sleepimage files. Restarting will clear the files in /var/swap/ which can grow considerably if youre running out of RAM or are using a laptop. If you are using a laptop youll find a file called sleepimage that is the size of the amount of RAM you have installed and it can be safely deleted - that said deleting anything from here is temporary. If you need more space than I suggest using applications (as suggested) like Grand Perspective or Disk Inventory X to find large files. You can also remove additional language translations by hand or using an application like Monolingual - for example as an extreme iWeb drops from nearly 1GB to ~150MB after running Monolingual. If you want to turn off FileVault you can follow Apples instructions: by simply unchecking FileVault in the Security Preference Pane in System Preferences."} +{"doc_id": 133960, "author": "Abhirup Manna", "text": "MyphoneExplorer is a desktop application which will help you send sms right from your desktop [requires phone to be connected either by usb wire/Bluetooth/wifi]. It is very easy to use and also numerous other features [for other features check website]. To get started you will need to download the desktop app from the website and also the android app which is available in the market for free."} +{"doc_id": 428881, "author": "anon", "text": "(given you have equal familiarity with both languages) Go with C++ unless there is no C++ compiler for your platform. You can write C++ code without whatever portion of the language you dont like (no classes, exceptions, virtual inheritance, whatever personal restrictions you wish to apply), and then at some time in the future, if you decide you want some of those features after all, then you can easily use them. Nothing in C++ prevents you from writing C-style code. (given equivalent toolsets and developer knowledge) There is no reason to choose C over C++ provided your platform has a C++ compiler. You can simply limit yourself to the subset of the language you wish to today, while leaving the door open for extension later."} +{"doc_id": 559957, "author": "Jaime Hablutzel", "text": "Use Java keytool to convert from JKS to P12... Export from keytools proprietary format (called JKS) to standardized format PKCS #12: keytool -importkeystore \\ -srckeystore keystore.jks \\ -destkeystore keystore.p12 \\ -deststoretype PKCS12 \\ -srcalias \\ -deststorepass \\ -destkeypass ...then use openssl to export from P12 to PEM Export certificate using openssl: openssl pkcs12 -in keystore.p12 -nokeys -out cert.pem Export unencrypted private key: openssl pkcs12 -in keystore.p12 -nodes -nocerts -out key.pem"} +{"doc_id": 428887, "author": "Caleb", "text": "Double-check your motivation. If you think the code should be changed, you ought to be able to articulate some reason why you think it should be changed. And that reason should be more concrete than I would have done it differently or its ugly. If you cant point to some benefit that comes from your proposed change, then theres not much point in spending time (a.k.a. money) in changing it. Every line of code in the project is a line that has to be maintained. Code should be as long as it needs to be to get the job done and be easily understood, and no longer. If you can shorten the code without sacrificing clarity, thats good. If you can do it while increasing clarity, thats much better. Code is like concrete: its more difficult to change after its been sitting a while. Suggest your changes early if you can, so that the cost and risk of changes are both minimized. Every change costs money. Rewriting code that works and is unlikely to need to be changed could be wasted effort. Focus your attention on the sections that are more subject to change or that are most important to the project. Form follows function, and sometimes vice versa. If the code is messy, theres a stronger likelihood that it also contains bugs. Look for those bugs and criticize the flawed functionality rather than the aesthetic appeal of the code. Suggest improvements that make the code work better and make the operation of the code easier to verify. Differentiate between design and implementation. An important class with a crappy interface can spread through a project like cancer. It will not only diminish the quality of the rest of the project, but also increase the difficulty of repairing the damage. On the other hand, a class with a well-designed interface but a lousy implementation shouldnt be a big deal. You can always re-implement the class for better performance or reliability. Or, if it works correctly and is fast enough, you can leave it alone and feel secure in the knowledge that its cruft is well encapsulated. To summarize all the above points: Make sure that your proposed changes add value."} +{"doc_id": 428890, "author": "DVK", "text": "Code review serves 3 purposes: Checking for bugs Checking to see where the code could be improved Teaching tool for whoever wrote the code. Evaluating design/code quality are of course about #2 and #3. As far as #2: Make it VERY clear what the benefits are from proposed changes vs costs to fix. As any business decision, this should be about cost/benefit analysis. E.g. X approach to design would significantly reduce the likelyhood of bug Y from occuring when doing change Z, and we know this piece of code undergoes changes of type Z every 2 weeks. The cost of handling production outage from bug Y + finding the bug + fixing and releasing the fix + opportunity cost from not delivering the next set of featires is $A; whereas the cost of cleaning up the code now and opportunity cost (e.g. price of shipping late or with less features) is $B. Now, evaluate - or rather have your team leader/manager - evaluate $A vs $B and decide. This will help the smart team leads to effectively manage this. E.g. they will make a rational decision using FULL information This will (especially if you word this well) raise YOUR status - e.g. you are someone intelligent enough to see the benefits of better design, AND smart enough not to religiously demand it without weighing business considerations. AND, in the likely event that bug Z happens, you gain all that much more leverage on the next set of suggestions. As far as #3: VERY clearly delineate must fix bugs/issues from This is a best practice and really ought to be fixed IF we can spare resources - see attached pro/con analysis design improvements (attache the stuff described for #2 above) vs These are general guidelines that I think would help you improve your code robustness so you can more easily maintain the code optional changes. Please note the wording - its not about making your code like what I want - its if you do this, YOU gain benefits a, b, c. The tone and approach matters."} +{"doc_id": 428891, "author": "Kramii", "text": "There is a sweet-spot for adding value through refactoring. Changes need to accomplish three things: improve code that is likely to change increase clarity cost least effort Considerations: We know that clean code is less expensive to write and maintain, and is more fun to work on. Your job is to sell that idea to people in your company. Think like a salesman, not like an arrogant grouch (ie. not like me). You cant win, you can only loose less. Focus on adding real value - not just beauty. I like my code to look nice, but sometimes have to accept that inexpensive matters more. A good way to find the sweet spot is to follow the Boy Scout Principle - when you work on an area of code, always leave it in better shape than you found it. A small improvement is better than no improvement. Make good use of automated tools. For example, tools that just clean up a bit of formatting can make a world of difference. Sell other ideas that incidentally improve code clarity. For example, unit testing encourages decomposing big methods into smaller ones."} +{"doc_id": 133981, "author": "GIWonder73", "text": "For other people looking for this answer who cant use realmBs solution because they cant access the internet on their phone. I just changed the file extension of my certificates from .cer to .crt and everything worked fine! Thanks to the users of this xda thread for the solution."} +{"doc_id": 461662, "author": "Erik Reppen", "text": "Some Tests/Indicators: Turn off the IDE. Can you still read your own code? When theres a bug is it fairly easy to trace through it by hand and figure out what class youll need a breakpoint in to figure out thats where the problem is? Or when you do use the IDE do you just not even bother and just step through from the very beginning? Does debug often becomes a game of wack-a-mole where fixing one bug creates 2+ more. From trigger pull to something useful actually happening, how many method calls does it take? How many methods pass the exact same or most of the same exact parameters on to another method call? How many files do you have to open to just add a simple new method to a class? Think on patterns and practices youve adopted. Did you do it because they made perfect sense or because somebody convinced you that its the only way to do it? or because you wanted it on your resume or because some rockstar dev said so."} +{"doc_id": 428903, "author": "deworde", "text": "In the event of cripplingly bad design, your focus should be on maximising the encapsulation. That way, it becomes easier to replace the individual classes/files/subroutines with better designed classes. Focus on ensuring that the public interfaces of the components are well designed, and that the internal workings are carefully concealed. Also, Data Storage wrappers are essential. (Large amounts of stored data can be very hard to change, so if you get implementation bleed into other areas of the system, youre in trouble). Once youve got the barriers between the components up, focus on the components most likely to cause major issues. Repeat until deadline or until system is perfect."} +{"doc_id": 559979, "author": "Brian Sparks", "text": "Not very familiar with how a server would be able to hide a global pepper constant but my take is that sooner or later a hacker that has penetrated the server will figure out how to capture the pepper value. To make a pepper value totally secure would require special hardware. One way to do this would be to use a FPGA board installed in the server. The FPGA would contain the code used to perform the hash including the pepper value and all hash calculations occur inside the FPGA. With the FPGA, the programming can be a one way function. The pepper can be programmed in but there is no instruction that can be sent to read it back out. The pepper would be stored on a piece of paper locked in a safe. If the pepper is 128+ bits generated randomly, there would be no practical way of determining it. Not sure how practical this would be as it would increase server hardware cost."} +{"doc_id": 396140, "author": "dsimcha", "text": "I use Python somewhat regularly, and overall I consider it to be a very good language. Nonetheless, no language is perfect. Here are the drawbacks in order of importance to me personally: Its slow. I mean really, really slow. A lot of times this doesnt matter, but it definitely means youll need another language for those performance-critical bits. Nested functions kind of suck in that you cant modify variables in the outer scope. Edit: I still use Python 2 due to library support, and this design flaw irritates the heck out of me, but apparently its fixed in Python 3 due to the nonlocal statement. Cant wait for the libs I use to be ported so this flaw can be sent to the ash heap of history for good. Its missing a few features that can be useful to library/generic code and IMHO are simplicity taken to unhealthy extremes. The most important ones I can think of are user-defined value types (Im guessing these can be created with metaclass magic, but Ive never tried), and ref function parameter. Its far from the metal. Need to write threading primitives or kernel code or something? Good luck. While I dont mind the lack of ability to catch semantic errors upfront as a tradeoff for the dynamism that Python offers, I wish there were a way to catch syntactic errors and silly things like mistyping variable names without having to actually run the code. The documentation isnt as good as languages like PHP and Java that have strong corporate backings."} +{"doc_id": 133995, "author": "eldarerathis", "text": "Each version of Android since 1.5 has been developed with a specific codename. These codenames are chosen alphabetically and have thus far all been dessert items (or, generically, sweet/sugary foods). Some codenames are associated with more than one version number, while others are limited to only a specific one, and the reason for this inconsistency is not currently known. The naming typically appears to correspond to changes in the developer API levels, but this is not always true (example: 3.0 and 3.1 are both Honeycomb but they have different API levels). The following names are used for the currently existing Android releases. Note that versions 1.0 and 1.1 were not publicly named. However, Android 1.1 was internally referred to as Petit-Four (noted in Traroths answer, confirmed here): Cupcake: Android 1.5 Donut: Android 1.6 Eclair: Android 2.0 Android 2.1 Froyo: (short for frozen yogurt) Android 2.2 Gingerbread: Android 2.3 Honeycomb: Android 3.0 Android 3.1 Android 3.2 Ice Cream Sandwich: Android 4.0 Jelly Bean: Android 4.1 Android 4.2 Android 4.3 KitKat: Android 4.4 Lollipop: Android 5.0 Android 5.1 Marshmallow: Android 6.0 Nougat: (official name: https://twitter.com/Android/status/748642375908589568) Android 7.0 Android 7.1 Oreo: Android 8.0 Android 8.1 Pie: Android 9.0 Android 10 (no codename) Android 11 (no codename)"} +{"doc_id": 133997, "author": "Alexis Dufrenoy", "text": "Eldarerathis summarized it very well. To add some things: The 1.1 version was internally called Petit Four by Google, and thats how it all began. Google is installing a giant pastry on their lawn at Mountain View each time a new version is about to be launched. You can see pictures of that display at different stages here: Donut, Android logo, Nexus one, Cupcake, Eclair Froyo Gingerbread, Icecream Sandwich, Honeycomb KitKat, Jellybean Lollipop Marshmallow Nougat Oreo"} +{"doc_id": 396144, "author": "cmcginty", "text": "My main complaint is threading, which is not as performant in many circumstances (compared to Java, C and others) due to the global interpreter lock (see Inside the Python GIL (PDF link) talk) However there is a multiprocess interface that is very easy to use, however it is going to be heavier on memory usage for the same number of processes vs. threads, or difficult if you have a lot of shared data. The benefit however, is that once you have a program working on with multiple processes, it can scale across multiple machines, something a threaded program cant do. I really disagree on the critique of the documentation, I think it is excellent and better than most if not all major languages out there. Also you can catch many of the runtime bugs running pylint."} +{"doc_id": 166773, "author": "rossmcm", "text": "As of September 2019, Dropbox for Android 156.2.2 the following applies on my Huawei Nova 3i phone. * This applies to files where Make available offline has been selected * If the file hasnt been made available offline I dont believe there is a persistent copy of it on the device, and the file contents are probably streamed when the file is accessed. If the file is located at \\Some folder\\Somefile.txt then when Make available offline is checked, a persistent copy of the file is placed at: Main storage/Android/data/com.dropbox.android/files/u123456/scratch/Some folder/Somefile.txt For other devices Main storage will be different. u123456 (obfuscated) probably identifies the Dropbox account. A quick test seems to indicate that there is no connection in the opposite direction -i.e. I created a folder and file using an android file manager at: Main storage/Android/data/com.dropbox.android/files/u123456/scratch/test/temp.txt but the file/folder doesnt appear in the dropbox image."} +{"doc_id": 396152, "author": "mipadi", "text": "I think the object-oriented parts of Python feel kind of bolted on. The whole need to explicitly pass self to every method is a symptom that its OOP component wasnt expressly planned, you could say; it also shows Pythons sometimes warty scoping rules that were criticized in another answer. Edit: When I say Pythons object-oriented parts feel bolted on, I mean that at times, the OOP side feels rather inconsistent. Take Ruby, for example: In Ruby, everything is an object, and you call a method using the familiar obj.method syntax (with the exception of overloaded operators, of course); in Python, everything is an object, too, but some methods you call as a function; i.e., you overload __len__ to return a length, but call it using len(obj) instead of the more familiar (and consistent) obj.length common in other languages. I know there are reasons behind this design decision, but I dont like them. Plus, Pythons OOP model lacks any sort of data protection, i.e., there arent private, protected, and public members; you can mimic them using _ and __ in front of methods, but its kind of ugly. Similarly, Python doesnt quite get the message-passing aspect of OOP right, either."} +{"doc_id": 428921, "author": "Trevor Boyd Smith", "text": "Your question is How to code review badly designed code?: The answer IMO is simple. Talk about the DESIGN of the code and how the design is flawed or doesnt meet requirements. If you point out a flawed design or doesnt meet requirement then the developer will be forced to change his code because it doesnt do what it needs to do. If the code is functionally sufficient and/or meets spec and/or meets requirements: If you are a peer to this developer, you do not have any direct power that would let you tell him to make changes. There are a couple of options left to you: You must use your own personal influence (a form of power that is indirect) and/or your ability to be persuasive get involved with your organizations code process group and start making code maintenance a higher priority. Bite the bullet and learn how to read crappy code faster/more-fluently so you dont get hung up (it sounds like you keep getting hung up or slowed down when you encounter crappy code) on crappy code. This will also make you a stronger programmer. And it will let you correct the crappy code when you are working on crappy code. And this will also let you work on more projects because many projects have crappy code that is functional... but lots of crappy code. Lead by example. Make your code better... but dont try to be a perfectionist. Because then you will become the slow guy who cant meet deadlines, is always criticizing, and thinks he is better than everyone else. I find there is no silver bullet. You have to use all three and you have to be creative in your usage of all three."} +{"doc_id": 101251, "author": "David Hollands", "text": "In Preview, yes...! I just discovered. Hold the option button down, and use 2 fingers on the trackpad. So many hidden features."} +{"doc_id": 101261, "author": "Unhelpful helper", "text": "It really depends...none of the answers above worked for me, until I disabled each extension one by one and found that it was the Adblocker. Thank goodness Chrome makes it easy to switch these on and off! As soon as I launched Chrome, CPU would jump to 240%, before opening up any sites. Weird thing is that Ive had this Adblocker extension installed for at least two years, and it just started happening this past week."} +{"doc_id": 428947, "author": "Morgan Herlocker", "text": "I sometimes find myself in a situation where a peers code is riddled with messy design and very little concern for future maintenance, though its functional and contains little to no bugs. This code is done. At a certain point, redesigns become too costly to justify. If the code is already functional with little to no bugs, then this will be an impossible sell. Suggest a few ways to clean this up in the future and move on. If/when the code breaks in the future, reassess the value of a redesign then. It may never break, which would be great. Either way, you are at the point where it makes sense to gamble that it will not break, since the cost will be the same now or later: drawn-out, terrible redesign. What you need to do in the future is have tighter development iterations. If you had been able to review this code before all the work of ironing out bugs had been invested, it would have made sense to suggest a redesign. Towards the end, it never makes sense to do major refactoring unless the code is written in a fundamentally unmaintainable way and you know for certain that the code will need to be changed soon after release. Given the choice between the two options (refactor or no refactor), think about which sounds like the smarter sell: Hey boss, we were on schedule and had everything working, but now we are going to rebuild a lot of stuff so that we can add feature X in the future. or Hey boss, we are ready to release. If we ever have to add on feature X, it might take us a couple extra days. If you said either one, your boss would likely say: Who said anything about feature X? The bottom line is that sometimes a bit of technical debt makes sense, if you were not able to correct certain flaws back when it was cheap (early iterations). Having quality code design has diminishing returns as you get closer to a completed feature and the deadline."} +{"doc_id": 428951, "author": "Ed Staub", "text": "When a spoonful of sugar helps the medicine go down, and whats wrong can be expressed succinctly - there arent 20 things wrong - Ill lead in with a form that suggests that I have no stake, no ego invested in what I want to be heard. Usually its something like: I wonder if it would be better to... or Does it make any sense to... If the reasons are fairly obvious, I dont state them. This gives other people a chance to assume some intellectual ownership of the suggestion, as in: Yes, thats a good idea, because < your obvious reason here >. If the improvement is fairly obvious, but not so much as to make me look an idiot for not thinking of it, and the reason to do it reflects a value shared with the listener, then sometime I dont even suggest it, instead: I wonder if theres a way to... < shared value statement here > This is only for dealing with really touchy people - with most of my peers, I just let em have it!"} +{"doc_id": 265117, "author": "Albert T. Wong", "text": "No. Although its close. There are some key differences. CentOS lacks certified cryptographic protection required on government networks. CVEs (Common Vulnerabilities and Exposures) are not tested on CentOS, and it is expensive to test them properly."} +{"doc_id": 330659, "author": "user308879", "text": "Similarly to other answers posted before, one can transfer a running process to use screen retrospectively thanks to reptyr and then close the terminal. The steps are described in this post. The steps to take are: Suspend the process Resume the process in the background Disown the process Launch a screen session Find the PID of the process Use reptyr to take over the process"} +{"doc_id": 101299, "author": "Pip", "text": "I updated an app called Karabiner and it turned some Mission Control shortcuts off. I like moving desktops. Heres how you turn them back on:"} +{"doc_id": 166844, "author": "Irfan Latif", "text": "In any terminal emulator app: ~$ ip -o a Applicable both to WiFi and Mobile Data. Doesnt require root."} +{"doc_id": 166845, "author": "Irfan Latif", "text": "Hostname is used to easily identify and remember hosts connected to a network. Its set on boot, e.g. from /etc/hostname on Linux based systems. Hostname is also a part of DHCPREQUEST (standardized as code 12 by IETF) which a DHCP client (Android device in our case) makes to DHCP server (WiFi router) to get an IP address assigned. DHCP server stores the hostnames to offer services like DNS. See details in How to ping a local network host by hostname?. Android - instead of using Linux kernels hostname service - used property net.hostname (since Android 2.2) to set a unique host name for every device which was based on android_id. This hostname property was used for DHCP handshake (as added in Android 2.2 and 4.0). In Android 6 net.hostname continued to be used (1, 2, 3, 4) in new Java DHCP client when native dhcpcd was abandoned and later service was removed in Android 7. Since Android 8 - when android_id became unique to apps - net.hostname is no more set, so a null is sent in DHCPREQUEST. See Android 8 Privacy Changes and Security Enhancements: net.hostname is now empty and the dhcp client no longer sends a hostname So the WiFi routers show no host names for Android 8+, neither we can set / unset / change it. However on rooted devices you can set net.hostname manually using setprop command or add in some inits .rc file to set on every boot. Or use a third party client like busybox udhcpc to send desired hostname and other options to router. See Connecting to WiFi via ADB Shell."} +{"doc_id": 166854, "author": "H. Hess", "text": "If you have magisk installed you can place the .sh to: /sbin/.magisk/img/.core/service.d/ or to /sbin/.magisk/img/.core/post-fs-data.d/ Dont forget to make it executable: chmod +x your-script.sh. More info: https://github.com/topjohnwu/Magisk/blob/master/docs/guides.md#boot-scripts"} +{"doc_id": 166856, "author": "Irfan Latif", "text": "Things were simple before Android 5 when SELinux wasnt enforcing. You could put your code in any script or replace a binary with script which was executed with root privileges on boot. Another method was to define a custom init service specifically to batch execute scripts from some directory. Based on these approaches custom ROM developers introduced different pseudo-init.d phenomenon like /etc/init.d/, /etc/install-recovery.sh, /etc/init.qcom.post_boot.sh, /system/bin/debuggerd, /data/init.sh, /data/local/userinit.sh, /data/local/init.d/ etc. However a process running with UID 0 but in a restricted SELinux context is quite helpless. A service started in init.rc file with u:r:init:s0 context cant even execute a shell script from /system/bin/, so SELinux policy needs to be patched to inject an unrestricted context e.g. Magisk defines u:r:magisk:s0. After that its possible to run a script directly as init service or from init.d-like directory. For details see How to run an executable on boot and keep it running?"} +{"doc_id": 68560, "author": "maxim", "text": "This is a partial-temporary solution. Basically, if the frequencies are on different channel numbers then it is possible to set the particular wifi band (worked on OSX Mavericks). High-level overview: Find the channel numbers of 5 GHz and 2.4 Ghz. Are they different? if yes proceed to step 2. Set 5 GHz channel number using airport command. Detailed instructions: Open Wireless Diagnostics.app (it is located in /System/Library/CoreServices/Applications). In the application menu select Window->Utilities (or press cmd+2).Utilities window should appear. Select WiFi Scan from Utilities window and press Scan Now button. This will show you all available networks, info about them and your active connection. Now, verify that BSSIDs of 2.4 and 5 Ghz with the same network name are on different channel numbers. If so, then most likely you can change the band by setting the channel number of the desired frequency band. Alternatively for steps 1-4, just type in the terminal sudo /System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport /usr/sbin/airport -s Turn Wifi off on your computer Open the Terminal.app and type: sudo /System/Library/PrivateFrameworks/Apple80211.framework/Resources/airport --channel=**num** where **num** is the channel number to set. This command will only work under administrator account (so make sure that you are log as an Administrator i.e. su YourAdministratorAccount) Turn Wifi On and connect to the network again You can check your active connection by Alt+Clicking on the WiFi icon on the OSX menu bar, re-scanning with Wireless Diagnostics or using the terminal :-). Thats all!"} +{"doc_id": 65731, "author": "David Mulder", "text": "Honestly, I agree that there are a lot of risks associated with using the root user as default. But let me just run through them and criticize some of the arguments a bit Defending against applications: Practically the permission system of *nix is not strong enough (by far) to allow running arbitrary programs. A malicious program on *nix is able to do enough evil stuff (like stealing your bank credentials) without root permissions. It will be somewhat harder for a non-root application than for a root application (e.g. instead of directly installing a root-certificate and intercepting the connection to the bank you will need to mess around with the browser instead, but hey, thats actually quite doable and you likely had to do that anyways to make sure the user doesnt notice anything) Defending against user mistakes (like running a wrong command and deleting all system files): Absolutely true, but even though a non-root user will save the system, all the important files will normally be lost already (as the user owned files are far more likely to be unique). Defending against exploitable bugs in applications you run: Now this is more like it. E.g. when you run a web server where a lot of applications are open to the outside and thus any exploitable bugs will be easily reached. The same still applies of course even if you are sitting behind a router and firewall, though the extent of the danger is far less significant. Once again however the question becomes how much the permission system will realistically defend on a private system. Without root permissions all private files can still be accessed and intercepting network data is also possible... the two most important things you can wish for as an attacker of a private system. (Now, on top of the standard *nix file permission system Apple has also introduced an application sandboxing system. As far as I know that one is still fully functional even when logged in as root. If however it werent then that would be a total deal breaker.) Either way, all considered I do not think its as terrible an idea as some others claim. Mind you, I am not saying its a good idea either, but I think that people overestimate the usefulness of the *nix file system permission model in protecting you. Yes, its incredibly useful for certain things (e.g. multi user systems, complex multi-application servers, keeping the system running no matter what happens (running, but not necessary usable), locking important files away (though youre better off encrypting those...), etc.), but its not some magical protection that prevents bad stuff from happening. In the comments I came up with an analogy which seems quite adequate in describing the situation. In Dutch we have a word for the little closet where you can find all the meters and the toggle for the main water supply, etc. Running as the root user account is like taking the lock off that little closet. Ironic fact: Most people dont have locks on it in the first place. Now, just like with the root user thats not to say that its not useful to lock it away in certain cases, for example in offices or other semi-public buildings its often locked away, but in normal houses its far more important to have a strong lock on the front door (not installing random things, firewall, etc.) and putting all your important stuff in a safe (making backups, encrypting stuff, etc.). Will an extra lock on that closet hurt? Nope, so it might be a good idea to have it in place just in case, but in all likelihood its going to be quite useless. Running as the root user is nothing like taking all the locks of your house and carrying all the stuff in the safe with you all the time as is claimed by David Richerby. Your passport (identity) is in no way protected by the *nix file system, your money (bank account) is in no way protected by the *nix file system, your important passwords are likely not protected by the *nix file system (if youre using Safari however they might actually protected partially by the *nix file system permission model, but without root you can still add an invisible extension to Safari and next just intercept the password the next time you use it), your photos are definitely not protected by the *nix file system and if youre using the terminal youre already carrying a chainsaw around with you (per point 2 above)."} +{"doc_id": 625624, "author": "Ronnie Royston", "text": "It represents the subnet (subnetwork) mask. 1) Re-write the IP address as computers read it. 192.168.0.0 converts to binary/base2/bits as 11000000.10101000.00000000.00000000 2) The number of bits (from left to right) that represent the subnet address is specified as 16 (/16). 16 bits, or 11000000.10101000 (192.168) is the network address and 0.0, or 00000000.00000000, is the host address - in this case a broadcast address. Its referred to a mask because you are telling the computer to ignore that number of bits when calculating the host address, or perhaps that the network wears those number of bits as its identifier, or mask."} +{"doc_id": 396255, "author": "Joanis", "text": "They should not; first method for me. When I look at the second one, because of the unused lines (those only having braces on it, other than the very last closing brace), it feels like it breaks the continuity of the code. I cant read it as fast because I need to take special attention to empty lines which usually mean a separation in code purpose or something like this, but in no case this line belongs to a curly brace (which only repeats the meaning of indentation). Anyway, just like when you write text... adding an indentation at the beginning of a paragraph is superfluous if there is a blank line before it (double sign of paragraph change), there is no need to waste lines for braces when we are properly indenting. Plus, as already stated, it allows to fit more code in the screen, which otherwise is a little bit counterproductive."} +{"doc_id": 396257, "author": "Vatine", "text": "Regular expressions are more suitable for tokenisation than for full-scale parsing. But, a surprisingly large set of things that programmers need to parse are parseable by a regular language (or, worse, almost parseable by a regular language and if you only write a little more code...). So if one is habituated to aha, I need to pick text apart, Ill use a regular expression, its easy to go down that route, when you need something thats closer to a push-down automaton, a CFG parser or even more powerful grammars. That usually ends in tears. So, I think the quote isnt so much slamming regexps, they have their use (and well-used, theyre very useful indeed), but the over-reliance on regexps (or, specifically, the uncritical choice of them)."} +{"doc_id": 68577, "author": "bmike", "text": "Since you have Filevault - that makes your situation precarious and a bit delicate. Some good news, Apple has disabled emoji entry in the password pane for 10.11 El Capitan - I cant paste or get emoji in the Users & Groups preference pane. Apples official manner to get past this is to click the ? in the password field and reset your password. If you used a recovery key - you can enter it without needing emoji keys. If you used AppleID, you can enter that without needing emoji. Barring that, If you have a current backup (or can let things complete if you are logged in), the fastest way froward is to wipe the Mac and then restore the backup. You can choose a new password for a new admin account and then import the user files. If you have ssh enabled, or remote desktop enabled, and the Mac is still logged in to your account, you could log in to the Mac from another Mac over the network. This will not work if you have rebooted the Mac and Recovery HD boot is waiting at the Filevault unlock screen. If you have a second admin account that is FileVault enabled, you could reboot and use that password to unlock the disk and then reset the password on your main account. Lastly, you could boot to recovery HD or internet recovery and reinstall the OS to a USB drive. Then you can mount the file vault drive and have emoji to unlock the volume and make a new admin account through various tricks / procedures similar to using single user mode to remove the .AppleSetupDone file."} +{"doc_id": 232419, "author": "Celada", "text": "Sure! Since youre used to vi keybindings, why not configure your shell to respond to them? For bash, put this in your ~/.inputrc: set editing-mode vi Running instances of bash will not re-read the file, so log out and back in. zsh will even detect this for you: if none of your startup scripts force the editing mode one way or the other using bindkey and if your $EDITOR environment variable is detected to contain vi, then it will automatically enable vi keybindings. If you need to force it, put this in your ~/.zshrc: bindkey -v Thereafter, use ESC as usual to enter command line and k and j to move up and down. ALSO: The default shell bindings in most shells are the emacs bindings, so actually Crtl-P and Ctrl-N should already work without you having to change anything."} +{"doc_id": 68580, "author": "David Moles", "text": "In my case, after trying the above on my work machine without success, I found that the culprit was Active Directory. The fix was to go into Directory Utility and edit the AD service settings (double-click on Active Directory) to enable Create mobile account at login: This apparently causes the AD credentials to be cached locally, so the system no longer has to go out to the server every time it tries to validate your password. You can get to Directory Utility with Spotlight or via the Login Options section of System Preferences / Users & Groups (select the Edit\u2026 button next to Network Account Server):"} +{"doc_id": 232421, "author": "Sparhawk", "text": "With csh or any shell implementing csh-like history substitution (tcsh, bash, zsh): !! Then Enter. Or alternatively: !-1 Then Enter. Or Ctrl+P, Enter Magic space Also, note that !! and !-1 will not auto-expand for you, until you execute them (when it might be too late). If using bash, you can put bind Space:magic-space into ~/.bashrc, then pressing Space after the command will auto-expand them inline, allowing you to inspect them before execution. This is particularly useful for history expansion from a command run a while ago, e.g. !echo will pull the last command run starting with echo. With magic space, you get to preview the command before its run. Thats the equivalent of doing bindkey magic-space in tcsh or zsh."} +{"doc_id": 68582, "author": "Zenexer", "text": "Certain applications appear to be triggering this problem when interacting with the microphone. The problem goes away a minute or two after the problem applications are closed. Problem applications These applications need to be restarted in order to restore normal CPU usage. They do not release their hold on the microphone properly. HipChat after first time camera/mic are used, such as when opening preferences (see HipChat forums) Boom by Global Delight Technologies (reference)--as far as I can tell, this shouldnt be using the microphone Background applications These applications could be running in the background while using your microphone. Any VoIP application such as Skype or FaceTime Adobe Flash in a web browser Virtualization software such as VMware, VirtualBox, or Parallels. These will likely listen to your microphone whenever you have a virtual machine running, even if that virtual machine doesnt seem to be using the microphone at the moment. Most virtualization software allows you to disable audio hardware virtualization, which should solve this, although some may not be granular enough to disable the microphone without also disabling the speakers. Specialized audio software such as that developed by Akai Pro (example report for EIE Pro with Logic X) Voice search/recognition software, including Google Chromes Ok Google voice search capability (reported as partially fixed by Chrome developers: Chrome will stop listening when switching users)"} +{"doc_id": 134119, "author": "Leif Andersen", "text": "You cant remove them without root, theyre installed to a directory that cannot be accessed without root. However, you can do the next best thing, close your eyes, stick your fingers in your ears, and go la-la-la-la, pretending they dont exist. The way to do this is to get a launcher (such as ADW if memory serves), that allows you to hide icons. In that case, they will still be installed (and if something triggers them, theyll pop up), but at least you wont have to look at them any more."} +{"doc_id": 166888, "author": "digna", "text": "After my small investigation I can say that this notification appears when one of your mutual subscribers creates another Instagram account using same phone number. Instagram does not allow to create multiple accounts using same email address, but it does allow it using same phone number. So, when you have no permissions granted to Instagram and no Facebook connected, it appears that user bob just created another account under the name bob123."} +{"doc_id": 232422, "author": "daisy", "text": "My favorite one is CTRL + P then CTRL + O This works by default, no extra configuration needed. ^P will let you switch to the last command and ^O will let you execute current line Note that CTRL + O can be used for as many times as you want"} +{"doc_id": 35818, "author": "YaOzI", "text": "MacPorts It is more independent of Mac OS X, this means MacPorts will just ignore many of the system libraries and softwares that already available in Mac OS X and pull its own one instead, which could be slower when the utility you install requires some set of large libraries and softwares. But this kind of choice is safer because the packages you installed are less influenced by Apples system update/upgrade procedure. Homebrew It is more dependent on existing Mac OS X installed packages, so this will speed up the installation of packages and minimize redundant libraries. But the risk is installed packages might be broken because of Apples system update/upgrade. So, these are the two different kind of tradeoff. Also, Homebrew takes over /usr/local by default, with which some folks dont like this because it somehow conflict with the unix-tradition and might cause problems if you\u2019ve already installed anything there (MySQL, etc.) Apart from these differences, considering the packages these two can offer, you can check with these two commands if you already have MacPorts/Homebrew installed, which show you the packages they currently provided: port list | wc -l brew search | wc -l And you will find out that MacPorts has many more packages than Homebrew. (19399 v.s 3583 on May 13 2016)"} +{"doc_id": 101359, "author": "boris42", "text": "There are several ways to regain/recreate administrator privileges on a Mac. The administrator account exists, but the password is forgotten: Boot into Recovery Partition, choose Terminal from the Utilities menu and type following: resetpassword This will launch the Reset Password app with which you can select an account and set a new password for it, thus enabling login for it. Resetting the password does NOT change the password of the accounts keychain file, so accessing data in the keychain still requires the original password. The administrator account does not exist and/or we want to create a new admin with the Setup Assistant The goal is to remove a flag file /var/db/.AppleSetupDone which tells macOS that the Setup Assistant has already completed. If the file is missing macOS will launch the Setup Assistant which includes the creation of a new account with administrative privileges (same as on first boot of a new Mac). Note that this may result in the new account being logging in automatically when the system is restarted, replacing whatever account may have been set to automatically log in previously. If you need to retain access to the original account without knowing its password, this may mess that up. You can make Setup Assistant run in (at least) three ways: a) use Terminal in Recovery. First you boot in Recovery Partition (CmdR at boot) and select Disk utility from the Utilities window. Select your system volume (usually named Macintosh HD) and click Mount button on the toolbar. Now the volume is read/write. Close Disk Utility, launch Terminal from the menu and type following command: rm /var/db/.AppleSetupDone Press Enter, quit Terminal and restart your Mac. When the system boots, Setup Assistant will be shown and you will be prompted to create a new administrative account. b) use Single User Mode. First you boot in Single User Mode (CmdS at boot). When the system boots up and prompt #root is displayed type following commands: /sbin/mount -uw / rm /var/db/.AppleSetupDone exit When the system boots, Setup Assistant will be shown and you will be prompted to create a new administrative account. c) use Target Disk mode with another computer If you have another Mac available, you can use Target Disk Mode (T at boot) and connect FireWire, Thunderbolt or USB-C cable between the Macs. On the other Mac you will see a yellow icon representing an external disk (but is actually the internal disk of your Mac in Target Disk Mode) which you can access with full read/write capabilities. Note the yellow volume name (usually Macintosh HD) and type following in Terminal (with appropriate volume name entered): rm /Volumes/Macintosh\\ HD/var/db/.AppleSetupDone Eject the yellow volume and use power button to shutdown and restart your Mac. When the system boots, Setup Assistant will be shown and you will be prompted to create a new administrative account. use Single User Mode and use command line tools to directly create a new user and make it a member of administrator group: First you boot in Single User Mode (CmdS at boot). When the system boots up and prompt #root is displayed type following commands: /sbin/mount -uw / launchctl load /System/Library/LaunchDaemons/com.apple.opendirectoryd.plist dscl . -create /Users/joeadmin dscl . -create /Users/joeadmin UserShell /bin/bash dscl . -create /Users/joeadmin RealName Joe Admin dscl . -create /Users/joeadmin UniqueID 510 dscl . -create /Users/joeadmin PrimaryGroupID 20 dscl . -create /Users/joeadmin NFSHomeDirectory /Users/joeadmin dscl . -passwd /Users/joeadmin password dscl . -append /Groups/admin GroupMembership joeadmin dseditgroup -o edit -a joeadmin -t user admin exit This will create an account joeadmin, account ID 510, with password password which will be an administrator."} +{"doc_id": 68592, "author": "Nate Black", "text": "I used bittorrent sync to sync 8TB between my Drobo and QNAP."} +{"doc_id": 265202, "author": "Catskul", "text": "zlib-flate -uncompress < IN_FILE > OUT_FILE I tried this and it worked for me. zlib-flate can be found in package qpdf (in Debian Squeeze, Fedora 23, and brew on MacOS according to comments in other answers) (Thanks to user @tino who provided this as a comment below the OpenSSL answer. Made into propper answer for easy access.)"} +{"doc_id": 232436, "author": "fedorqui", "text": "With any POSIX shell implementing the User Portability option (includes ksh, zsh, bash, yash), you can also use the fc command: fc -e : -1 See an example: $ echo hello hello $ fc -e : -1 echo hello hello More info in Execute a range of commands from historys answer by Jonathan Leffler."} +{"doc_id": 68612, "author": "voices", "text": "Even if youre locked out, you should still have access to the Accessibility Settings part of your System Preferences, in which case you can proceed to Keyboard Preferences and adjust your peripheral input sources. This means you can add/enable Unicode Hex Input. Plus, checking \u2611\ufe0e Show Input menu in menu bar will allow you to \u2328 Show Character Viewer and inject a variety of Unicode & Emoji characters."} +{"doc_id": 166919, "author": "NocTurn", "text": "Once you have Android Studio set up make sure you can connect to an emulator or a device where it will be listed in the AVD (Android Virtual Devices). If a physical device is connected confirm that debugging mode is enabled and access is allowed to Android Studio. A separate ADB is not needed as all the build tools are part of the IDE. Now you are ready to access your devices shell! Access the terminal at the bottom of the IDE by selecting the Terminal button. In the terminal issue adb devices. This will list the all devices currently connected to Android Studio. Find and use your devices name for step 3. Now issue adb -s shell. Now you are in your devices shell. On a side note, if you want to access the shell of an emulator with root access installed from Android Studio, issue a adb -s root before accessing the shell."} +{"doc_id": 3089, "author": "Philip Regan", "text": "Tethering isnt allowed for the iPhone or iPad (at least not without jailbreaking it, I believe). At the same time, I have a wifi-only iPad, and I have yet to find myself in a situation where I was 1.) in a public wifi-less area and 2.) I just had to have a book for the Kindle app at that very moment. The chances are more off than on, realistically, and a little planning gets obviates the scenario you describe. I feel the 3G is a bit of a boondoggle, at least for me."} +{"doc_id": 101395, "author": "Senseful", "text": "(None of the other answers explain how to type a superscript besides copy/pasting.) To type a superscript character in an application that doesnt support superscript, you can use Character Viewer: Edit > Emoji & Symbols \u2303\u2318Space Youll find these under the Symbols > \u2460 Digits list Here they are for copy pasting: Superscript: \u2070\u00b9\u00b2\u00b3\u2074\u2075\u2076\u2077\u2078\u2079 Subscript: \u2080\u2081\u2082\u2083\u2084\u2085\u2086\u2087\u2088\u2089"} +{"doc_id": 363557, "author": "Feriman", "text": "I did some performance tests with wget and curl, and the result is: 100 times tested average run time while download 1MB file: wget: 0.844s cURL: 0.680s 100 times tested average run time while download 5MB file: wget: 1.075s cURL: 0.863s 100 times tested average run time while download 10MB file: wget: 1.182s cURL: 1.074s Command size on the system: wget: 371K cURL: 182K"} +{"doc_id": 232486, "author": "Dan", "text": "With csh or any shell implementing csh-like history substitution (tcsh, bash, zsh), you can also use the ! to call the last command beginning with . for example if you ran $ tail file.txt $ less otherfile.txt $ !ta !ta would execute tail file.txt"} +{"doc_id": 429110, "author": "David Schwartz", "text": "GET and POST have a clear content-neutral rationale. GET is to retrieve the content of an URL in a way that is safe to repeat and possibly cache. POST is to do something in a way that is not safe to repeat, execute speculatively, or cache. There was no similar rationale for PUT or DELETE. They are both completely covered by POST. Creating or destroying a resource are operations that are not safe to repeat, not safe to execute speculatively, and should not be cached. There are no additional special semantics needed for them. So basically there is no benefit."} +{"doc_id": 134199, "author": "kzh", "text": "Apparently nobody. The developer gets $0. http://shiftyjelly.wordpress.com/2011/08/02/amazon-app-store-rotten-to-the-core/ via http://www.reddit.com/r/programming/comments/j6neg/amazon_app_store_rotten_to_the_core/"} +{"doc_id": 232502, "author": "Jared Chmielecki", "text": "I find I need to redo commands as super user often, so sudo !! redoes the previous command as if I had remembered to type sudo in the first place."} +{"doc_id": 65751, "author": "C\u0153ur", "text": "This answer is aimed at latest compatibility for OS X 10.11 El Capitan. Install latest osxfuse (3.x.x) from https://github.com/osxfuse/osxfuse/releases or from Homebrew with brew cask install osxfuse. Install latest NTFS-3G (2015.3.14) from brew (homebrew/fuse/ntfs-3g). As follow: brew install ntfs-3g Link NTFS-3G to boot after temporary disabling System Integrity Protection. As follow: [reboot by holding CMD+R to get in recovery mode] csrutil disable [reboot normally] sudo mv /sbin/mount_ntfs /sbin/mount_ntfs.original sudo ln -s /usr/local/sbin/mount_ntfs /sbin/mount_ntfs [reboot by holding CMD+R to get in recovery mode] csrutil enable [reboot normally] installation alternative For building NTFS-3G from source edge (2015.9.30): check this answer"} +{"doc_id": 232508, "author": "Gilles 'SO- stop being evil'", "text": "Most shells that have a command line editing feature support Emacs key bindings. (a tiny subset) Up Ctrl+P Down Ctrl+N Left Ctrl+B Right Ctrl+F Home Ctrl+A End Ctrl+E Delete Ctrl+D Alternatively, you could set up your shell to use vi command editing mode, by adding set -o vi to your shell startup file (e.g., ~/.bashrc). Then, for example, you can Use EsckEnter to re-execute the previous command (same as !!). The minus key - also works as a move up command. Use EsckkkkkkkkkkkkEnter or Esc12kEnter to re-execute the 12th previous command (same as !-12). Use Esc and a motion command (i.e., k, suitably repeated), and then edit the bash command line you find there. Remember, you will still be in vi command mode, so you will need to use a vi editing command (e.g., I, A, C, R, or one of their lowercase counterparts) to actually add to the command from history. So, for example, EsckisudoSpaceEnter is equivalent to sudo !!. For advanced users: you can even copy (yank) text from one line and paste (put) it on another, so you can accomplish results comparable to !-2:- !$. (Unfortunately, it does not seem to support named buffers.)"} +{"doc_id": 166976, "author": "Robert", "text": "The correct way to verify an APK file is to use apksigner. apksigner is part of the Android build tools, therefore you may find multiple versions installed, one for each build-tools version installed. One example path within the Android SDK to apksigner is: android-sdk/build-tools/29.0.2/apksigner Execute apksigner this way: apksigner verify --verbose --print-certs Signal-website-universal-release-4.49.13.apk Verifies Verified using v1 scheme (JAR signing): true Verified using v2 scheme (APK Signature Scheme v2): true Verified using v3 scheme (APK Signature Scheme v3): true Number of signers: 1 Signer #1 certificate DN: CN=Whisper Systems, OU=Research and Development, O=Whisper Systems, L=Pittsburgh, ST=PA, C=US Signer #1 certificate SHA-256 digest: 29f34e5f27f211b424bc5bf9d67162c0eafba2da35af35c16416fc446276ba26 Signer #1 certificate SHA-1 digest: 45989dc9ad8728c2aa9a82fa55503e34a8879374 Signer #1 certificate MD5 digest: d90db364e32fa3a7bda4c290fb65e310 Signer #1 key algorithm: RSA Signer #1 key size (bits): 1024 Signer #1 public key SHA-256 digest: 75336a3cc9edb64202cd77cd4caa6396a9b5fc3c78c58660313c7098ea248a55 Signer #1 public key SHA-1 digest: b46cbed18d6fbbe42045fdb93f5032c943d80266 Signer #1 public key MD5 digest: 0f9c33bbd45db0218c86ac378067538d Now you have verified the APK, but you still dont know if you can trust the person/organization who has signed the APK file. This is because on Android APK signatures use by definition self-signed certificates. If you can trust a certificate is therefore a difficult question. The only way is to check the other apps that have been signed using the same certificate. The only way I know to do so is to use online PlayStore crawling services like androidobservatory.org. It has an API for checking which apps have been signed by the same certificate using the certificate SHA-1 digest: SHA-1: https://androidobservatory.org/cert/45989DC9AD8728C2AA9A82FA55503E34A8879374 Edit: apkmirror.com also allows to search for the certificate digest. Just enter the plain SHA-1 or SHA-256 certificate digest (without colons or spaces) in the search field: SHA-1: https://www.apkmirror.com/?post_type=app_release&searchtype=app&s=45989DC9AD8728C2AA9A82FA55503E34A8879374 SHA-256: https://www.apkmirror.com/?post_type=app_release&searchtype=apk&s=29f34e5f27f211b424bc5bf9d67162c0eafba2da35af35c16416fc446276ba26 On this page you can see all the other APK files on Google Play Store that are signed with the same certificate."} +{"doc_id": 3138, "author": "Fake Name", "text": "PresButan (Mac OS X 10.4.x-10.7.x): An idiotically named solution to a spectacular UI oversight (rather apropos, eh?). It also lets you use either the backspace or delete key to delete files. I can confirm it works (just installed it). Apparently it leverages the accessibility system and uses a daemon to catch the return events. If you are on 10.3-10.5, you also have the option of using ReturnOpen."} +{"doc_id": 429126, "author": "maxpolun", "text": "My understanding is that browsers dont know what to do once they send a PUT or a DELETE. A POST will redirect to an appropriate page usually, but PUT and DELETE typically dont. This makes them appropriate for calling via ajax or a native program, but not from a web browser form. I cant hind it right now, but I remember reading one of the html5 mailing lists when they were discussing this."} +{"doc_id": 265288, "author": "travnik", "text": "This is probably what you want my_command > output.log 2>&1 & this will start your command, redirecting both stdout and stderr to some output.log which you can specify. If you dont care to store the output at all - you can use /dev/null instead of an actual file. & will execute command in the background so that you can continue inputting commands while it is running. 2>&1 redirects stderr to stdout so that all output is caught. also, when you run a command like this, you should get a confirmation from the kernel similar to this: [2] 1234 This means that your process is running in the background and its id is 1234, so you can kill it later if you wish with kill -9 1234"} +{"doc_id": 35921, "author": "user48089", "text": "You could try SecureCRT and SecureFX from VanDyke Software."} +{"doc_id": 35923, "author": "iconoclast", "text": "The best GUI application for SSH (and everything else you can do on the command line) is iTerm 2. While the original iTerm had a tabbed interface before Terminal did, iTerm 2 again eclipses Terminal by adding: Support for 256 colors (youll never go back to 16 colors after using 256) Split panes (the sort of thing you can do in GNU screen or tmux, but at the level of the terminal emulator rather than in a program running on the server) Special provision for integrating with tmux (an alternative to GNU screen, and which most people regard as better & faster than screen) Terminal-level auto-completion (I dont use this feature so I cant detail how it has advantages over shell-level autocompletion: especially if you use the fish shell or zsh, then it may not be better) Growl support an Expos\u00e9-like view of your tabs a full-screen view (and you can choose from either its own or OS Xs built-in full-screen mode; I greatly prefer iTerms own full-screen mode, since it doesnt force you to move to a new Space, thus allowing Command-Tab to still work properly) paste history (a good complement to the shells command histories) Search Instant Replay and a lot more. Some are mentioned here but some are not, such as co-processes, triggers,smart selection, semantic history, and so on. Development is pretty active, but documentation seems to lag behind. I highly recommend it. Ive been using it for years now and have never missed Terminal. (Its possible Terminal does some of the things I mention here--its been so long since Ive used it that I dont recall, but when I switched I paid close attention to the differences and there were lots of advantages to iTerm. And it keeps getting better every few weeks or months.)"} +{"doc_id": 592991, "author": "user3260912", "text": "Even the most modern versions of HTTPS using TLS can easily be intercepted by a MitM (e.g. a Juniper device configured for the purpose) if the client trusts the CA. In that particular case, its not secure."} +{"doc_id": 396387, "author": "John Kraft", "text": "OOP requires the ability to think abstractly; a gift/curse that few people, even professional programmers, really have."} +{"doc_id": 396388, "author": "Tim Claason", "text": "I actually have a blog called Struggles in Object Oriented Programming, that was born out of some of my struggles with learning it. I think it was particularly difficult for me to understand because I spent so much time using procedural programming, and I had a tough time getting my head around the idea that an object could be represented by a collection of attributes and behaviors (I was used to simply a collection of variables and methods). Also, theres a lot of concepts that make a language object oriented - inheritance, interfaces, polymorphism, composition, etc. There really is a lot to learn about the theory of it before you can actually write code effectively, and in an object-oriented way, whereas with procedural programming, its simply a matter of understanding things like memory allocation for variables, and entry point calls to other methods."} +{"doc_id": 396390, "author": "DBlackborough", "text": "I dont think it is difficult to understand but it may be that a lot of the programmers querying are new to the concept, coming from procedural languages. From what I have seen/read lots of people (in forums at least) look for a result from OOP. If you are a procedural programmer who doesnt go back and modify extend their code it can probably be hard to understand the benefits. Also, there is a lot of bad OOP out there, if people are reading/seeing that then it is easy to see why they might find it difficult. IMO you need to wait until it clicks or be taught by someone with real knowledge, I dont think you can rush."} +{"doc_id": 396391, "author": "CodexArcanum", "text": "Any paradigm requires a certain push over the edge to grasp, for most people. By definition, its a new mode of thought and so it requires a certain amount of letting go of old notions and a certain amount of fully grasping why the new notions are useful. I think a lot of the problem is that the methods used to teach computer programming are pretty poor in general. OOP is so common now that its not as noticeable, but you still see it often in functional programming: important concepts are hidden behind odd names (FP: Whats a monad? OOP: Why do they call them functions sometimes and methods other times?) odd concepts are explained in metaphor instead of in terms of what they actually do, or why youd use them, or why anyone ever thought to use them (FP: A monad is a spacesuit, it wraps up some code. OOP: An object is like a duck, it can make noise, walk and inherits from Animal) the good stuff varies from person to person, so its not quite clear what will be the tipping point for any student, and often the teacher cant even remember. (FP: Oh, monads let you hide something in the type itself and carry it on without having to explicitly write out whats happening each time. OOP: Oh, objects let you keep the functions for a kind of data with that data.) The worst of it is that, as the question indicates, some people will immediately snap to understanding why the concept is good, and some wont. It really depends on what the tipping point is. For me, grasping that objects store data and methods for that data was the key, after that everything else just fit as a natural extension. Then I had later jumps like realizing that a method call from an object is very similar to making a static call with that that object as the first parameter. The little jumps later on help refine understanding, but its the initial one that takes a person from OOP doesnt make sense, why do people do this? to OOP is the best, why do people do anything else?"} +{"doc_id": 560232, "author": "R.. GitHub STOP HELPING ICE", "text": "No matter which CA you go with, your users assurance that theyre actually communicating with your site and not an attacker is only as good as the worst CA their browser trusts - an attacker who wants to forge a certificate can shop for a CA with bad practices. So I dont see any plausible argument that your choice of CA impacts your sites security, unless you choose a CA that generates private keys for you rather than signing a key you provide, or that disallows large key sizes. Other than that, as others have said, its probably a good idea to avoid CAs whose mix of bad practices and small size makes it plausible that their trust might be revoked by one or more browsers, since this would impact the accessibility (and public perception) of your site."} +{"doc_id": 396393, "author": "Ken Bloom", "text": "You should read Objects Never? Well, Hardly Ever. (ACM membership required) by Mordechai Ben-Ari who suggests that OOP is so difficult, because its not a paradigm thats actually natural for modeling anything. (Though I have reservations about the article, because its not clear what criteria he feels a program needs to satisfy to say that its written on the OOP paradigm as opposed to a procedural paradigm using an OO language.)"} +{"doc_id": 3178, "author": "Mike Scott", "text": "I dont think you can set this for individual folders. To set it globally, so that Finder always shows hidden files, run Terminal and enter the following two commands: defaults write com.apple.finder AppleShowAllFiles true killall Finder To switch back, do the same but substitute false for true. This works all the way through macOS Catalina (and betas for Big Sur)."} +{"doc_id": 396394, "author": "dsimcha", "text": "I personally found the mechanics of OOP fairly easy to grasp. The hard part for me was the why of it. When I was first exposed to it, it seemed like a solution in search of a problem. Here are a few reasons why I think most people find it hard: IMHO teaching OO from the beginning is a terrible idea. Procedural coding is not a bad habit and is the right tool for some jobs. Individual methods in an OO program tend to be pretty procedural looking anyhow. Furthermore, before learning procedural programming well enough for its limitations to become visible, OO doesnt seem very useful to the student. Before you can really grasp OO, you need to know the basics of data structures and late binding/higher order functions. Its hard to grok polymorphism (which is basically passing around a pointer to data and a bunch of functions that operate on the data) if you dont even understand the concepts of structuring data instead of just using primitives and passing around higher order functions/pointers to functions. Design patterns should be taught as something fundamental to OO, not something more advanced. Design patterns help you to see the forest through the trees and give relatively concrete examples of where OO can simplify real problems, and youre going to want to learn them eventually anyhow. Furthermore, once you really get OO, most design patterns become obvious in hindsight."} +{"doc_id": 3180, "author": "Am1rr3zA", "text": "I myself use hiddenfiles widget. its so easy to use. Beside that you can use free 3rd-party software like Deeper or TinkerTool."} +{"doc_id": 396399, "author": "JeffO", "text": "I think many programmers have difficulty with upfront design and planning to begin with. Even if someone does all the design for you, it is still possible to break away from OOP principles. If I take a bunch of spaghetti code and dump it into a class, is that really OOP? Someone who doesnt understand OOP can still program in Java. Also, dont confuse difficulty to understand with not willing to follow a certain methodolgy or disagreeing with it."} +{"doc_id": 396403, "author": "Dan Monego", "text": "Because the basic explanation of OOP has very, very little to do with how its used in the field. Most programs for teaching it try to use a physical model, such as Think of a car as an object, and wheels as objects, and the doors, and the transmission ..., but outside of some obscure cases of simulation programming, objects are much more often used to represent non-physical concepts or to introduce indirection. The effect is that it makes people understand it intuitively in the wrong way. Teaching from design patterns is a much better way to describe OOP, as it shows programmers how some actual modeling problems can be effectively attacked with objects, rather than describing it in the abstract."} +{"doc_id": 396405, "author": "Jerry Coffin", "text": "I think there are a few of factors that havent been mentioned yet. First of all, at least in pure OOP (e.g., Smalltalk) where everything is an object, you have to twist your mind into a rather unnatural configuration to think of a number (for only one example) as an intelligent object instead of just a value -- since in reality, 21 (for example) really is just a value. This becomes especially problematic when on one hand youre told that a big advantage of OOP is modeling reality more closely, but you start off by taking what looks an awful lot like an LSD-inspired view of even the most basic and obvious parts of reality. Second, inheritance in OOP doesnt follow most peoples mental models very closely either. For most people, classifying things most specifically does not have anywhere close to the absolute rules necessary to create a class hierarchy that works. In particular, creating a class D that inherits from another class B means that objects of class D share absolutely, positively all the characteristics of class B. class D can add new and different characteristics of its own, but all the characteristics of class B must remain intact. By contrast, when people classify things mentally, they typically follow a much looser model. For one example, if a person makes some rules about what constitutes a class of objects, its pretty typical that almost any one rule can be broken as long as enough other rules are followed. Even the few rules that cant really be broken can almost always be stretched a little bit anyway. Just for example, consider car as a class. Its pretty easy to see that the vast majority of what most people think of as cars have four wheels. Most people, however, have seen (at least a picture of) a car with only three wheels. A few of us of the right age also remember a race car or two from the early 80s (or so) that had six wheels -- and so on. This leaves us with basically three choices: Dont assert anything about how many wheels a car has -- but this tends to lead to the implicit assumption that itll always be 4, and code thats likely to break for another number. Assert that all cars have four wheels, and just classify those others as not cars even though we know they really are. Design the class to allow variation in the number of wheels, just in case, even though theres a good chance this capability will never be needed, used, or properly tested. Teaching about OOP often focuses on building huge taxonomies -- e.g., bits and pieces of what would be a giant hierarchy of all known life on earth, or something on that order. This raises two problems: first and foremost, it tends to lead many people toward focusing on huge amounts of information thats utterly irrelevant to the question at hand. At one point I saw a rather lengthy discussion of how to model breeds of dogs, and whether (for example) miniature poodle should inherit from full sized poodle, or vice versa, or whether there should be an abstract base Poodle class, with full-size poodle and miniature poodle both inheriting from it. What they all seemed to ignore was that the application was supposed to deal with keeping track of licenses for dogs, and for the purpose at hand it was entirely adequate to have a single field named breed (or something on that order) with no modeling of the relationship between breeds at all. Second, and almost importantly, it leads to focusing on the characteristics of the items, instead of focusing on the characteristics that are important for the task at hand. It leads toward modeling things as they are, where (most of the time) whats really needed is building the simplest model that will fill our needs, and using abstraction to fit the necessary sub-classes to fit the abstraction weve built. Finally, Ill say once again: were slowly following the same path taken by databases over the years. Early databases followed the hierarchical model. Other than focusing exclusively on data, this is single inheritance. For a short time, a few databases followed the network model -- essentially identical to multiple inheritance (and viewed from this angle, multiple interfaces arent enough different from multiple base classes to notice or care about). Long ago, however, databases largely converged on the relational model (and even though they arent SQL, at this level of abstraction the current NoSQL databases are relational too). The advantages of the relational model are sufficiently well known that I wont bother repeating them here. Ill just note that the closest analog of the relational model we have in programing is generic programming (and sorry, but despite the name, Java generics, for one example, dont really qualify, though they are a tiny step in the right direction)."} +{"doc_id": 68726, "author": "phoenixtu", "text": "Your phone may not be on Do Not Disturb, but that conversation is - its a separate setting from your phones Do Not Disturb setting. Just go into that conversation -> Details -> toggle the Do Not Disturb switch and you should have your notifications back."} +{"doc_id": 396408, "author": "ElGringoGrande", "text": "I think the reason OOP is difficult for many is because the tools dont really facilitate it. Computer languages today are an abstraction of what is going on in the computer. OOP is an abstracted way to represent abstractions. So we are using an abstraction to build abstractions with an abstraction. Add to this that what we are abstracting are usually very complex physical/social interactions and, well, no wonder."} +{"doc_id": 396409, "author": "John Fisher", "text": "I think you can summarize the basic difficulty this way: // The way most people think. Operation - object - parameters // Example: Turn the car left. // The way OOP works conceptually Object - operation - parameters // Example: Car.Turn(270); Sure, people can get used to the mapping of left as 270, and yeah, saying Car.Turn instead of turn the car isnt such a huge leap. BUT, to deal well with these objects and to create them, you have to invert the way you normally think. Instead of manipulating an object, were telling the object to actually do things on its own. It may not feel difficult any more, but telling a window to open itself sounds odd. People unused to this way of thinking have to struggle with that oddness over and over until finally it somehow becomes natural."} +{"doc_id": 494715, "author": "Stephen", "text": "There are a few reasons. Nobody reads documentation. Nobody follows the documentation even if they do read it. Nobody updates the documentation even if they do read it and follow it. Writing a list of practices is much less effective than creating a culture. Coding standards are not about what people should do, but are about what they actually do. When people deviate from the standards, this should be picked up and changed through a code review process and/or automated tools. Remember, the whole point of coding standards is to make our lives easier. Theyre a shortcut for our brain so that we can filter out the necessary stuff from the important stuff. Its much better to create a culture of review to enforce this than it is to formalise it in a document."} +{"doc_id": 396413, "author": null, "text": "Object Oriented Programming in itself is not hard. The hard part comes in doing it well. Where to put the cut between code so you can easily move things to the common base object, and extend them later? How to make your code usable by others (extend classes, wrap in proxies, override method) without jumping through hoops to do so. That is the hard part, and if done right can be very elegant, and if done badly can be very clumsy. My personal experience is that it requires a lot of practice to have been in all the situations where you would WISH that you did it differently, in order to do it well enough this time."} +{"doc_id": 199805, "author": "Gerald Senarclens de Grancy", "text": "Another option would be to enable colors and use less -r as your pager. git config --global color.ui true git config --global core.pager less -r This results in [color] ui = true [core] pager = less -r in your ~/.gitconfig For more information see the Pro Git book. Possible values for color.ui can be found in the man page of git-config. The output of man git-config | grep color.ui$ -A8 is color.ui This variable determines the default value for variables such as color.diff and color.grep that control the use of color per command family. Its scope will expand as more commands learn configuration to set a default for the --color option. Set it to false or never if you prefer Git commands not to use color unless enabled explicitly with some other configuration or the --color option. Set it to always if you want all output not intended for machine consumption to use color, to true or auto (this is the default since Git 1.8.4) if you want such output to use color when written to the terminal."} +{"doc_id": 167042, "author": "Irfan Latif", "text": "Some major changes occurred to storage in Android 4.4 (see Androids Storage Journey). So the following is generally true for Android 4.4+ and particularly 6+. This is from my detailed answer to How disk space is used on Android device?. Apps files are saved (by system and app itself) to internal and external storage under different categories. DIRECTORY DESCRIPTION / API ===================================================================================== APP CODE ======== /data/app/* (user apps installation directory) /data/app/*/base.apk (original `.apk` file) /data/app/*/lib//*.so (shared libraries) /data/app/*/oat//base.[art|odex|vdex] (compiled executable code) /data/dalvik-cache//*.[art|dex|oat|vdex] (compiled executable code, only for system apps) /data/misc/profiles/cur///primary.prof (ART profile) /data/misc/profiles/ref//primary.prof (ART profile) INTERNAL STORAGE ================ /data/user[_de]// getDataDir /data/user[_de]///files getFilesDir /data/user[_de]///[code_]cache getCacheDir or getCodeCacheDir /data/user[_de]///databases getDatabasePath /data/user[_de]///no_backup getNoBackupFilesDir /data/user[_de]///shared_prefs getSharedPreferences EXTERNAL STORAGE ================ /storage/emulated/obb//*.obb (shared by multi-users, exposed in following view) /storage/emulated//Android/obb//*..obb getObbDirs /storage/emulated//Android/media/ getExternalMediaDirs /storage/emulated//Android/data// /storage/emulated//Android/data//files getExternalFilesDirs /storage/emulated//Android/data//[code_]cache getExternalCacheDirs All of the above paths on internal and external storage (primary and secondary) are apps private directories which are accessible to respective app without requesting any permission. Apps can also create other directories (not explicitly available through APIs) in their private storage. All of these directories belonging to an app are deleted when the app is uninstalled. Additionally apps can put their data anywhere on primary external storage (including some standard directories and other apps private directories) if WRITE_EXTERNAL_STORAGE permission is granted (getExternalStorageDirectory returns /storage/emulated/). For secondary external storage and removable storage SAF is used. See details in How to save files to external SD card?. However in Android 10 writing directly to primary external shared storage is deprecated (getExternalStorageDirectory and getExternalStoragePublicDirectory are no more available). Apps need to use one of Androids built-in content providers; either MediaStore (for media files) or SAF (for any other type of files). /data paths may get replaced with /mnt/expand/[UUID] when using Adoptable Storage. /storage/emulated gets replaced with /storage/[UUID] when using secondary external storage (like SD card). For multiple users/profiles is different, device owner is always 0. /data/user/0 is a symlink to /data/data for historical reasons. Secondary external storage is only available to device owner. OBB directory is shared among users/profiles (up to Android 9) to save space. FUSE/sdcardfs always exposes /storage/emulated/obb as /storage/emulated//Android/obb. /data/user_de is the Device Encrypted storage on FBE devices which lets certain apps run on boot without asking for user credentials. /data/misc/profiles are used by ART for profile-guided compilation of app code. Description of each directory is somewhat evident from names, details can be seen in API documentation. Caches are cleared by OS when running low on storage, keeping apps exceeding the allotted quota on top. Apps private files directories in external storage arent automatically scanned by MediaScanner but media directories are. Caches and no_backup directories are not backed up to cloud. See official documentation."} +{"doc_id": 199816, "author": "St\u00e9phane Chazelas", "text": "With GNU or FreeBSD find, you can use the -quit predicate: find . ... -print -quit The NetBSD find equivalent: find . ... -print -exit If all you do is printing the name, and assuming the filenames dont contain newline characters, you could do: find . ... -print | head -n 1 That will not stop find after the first match, but possibly, depending on timing and buffering upon the second match or (much) later. Basically, find will be terminated with a SIGPIPE when it tries to output something while head is already gone because it has already read and displayed the first line of input. Note that not all shells will wait for that find command after head has returned. The Bourne shell and AT&T implementations of ksh (when non-interactive) and yash (only if that pipeline is the last command in a script) would not, leaving it running in background. If youd rather see that behaviour in any shell, you could always change the above to: (find . ... -print &) | head -n 1 If youre doing more than printing the paths of the found files, you could try this approach: find . ... -exec sh -c printf %s\\n $1; kill -s PIPE $PPID sh {} \\; (replace printf with whatever you would be doing with that file). That has the side effect of find returning an exit status reflecting the fact that it was killed though. Were sending the SIGPIPE signal instead of the default SIGTERM to avoid the message that some shells display when parts of a pipe line are killed with a signal. They generally dont do it for deaths by SIGPIPE, as those are naturally happening (like in find | head above...)."} +{"doc_id": 134285, "author": "Shoan", "text": "If you have Tasker installed, then Create an application context for the camera app Create a task to set system volume down to zero. When you exit the camera app, the system volume is restored to its original state."} +{"doc_id": 494748, "author": "Jeffrey Sweeney", "text": "The point that the sole reason global variables cant be trusted since the state can be changed somewhere else is, in itself, not reason enough to not use them, agreed (its a pretty good reason though!). Its likely the answer was mainly describing usage where restricting a variables access to only areas of code that its concerned with would make more sense. Databases are a different matter, however, because theyre designed for the purpose of being accessed globally so to speak. For example: Databases typically have built in type and structure validation that goes further than the language accessing them Databases almost unanimously update based off transactions, which prevents inconsistent states, where theres no guarantees what the end state will look like in a global object (unless its hidden behind a singleton) Database structure is at least implicitly documented based off table or object structure, more-so than the application utilizing it Most importantly though, databases serve a different purpose than a global variable. Databases are for storing and searching large quantities of organized data, where global variables serve specific niches (when justifiable)."} +{"doc_id": 494749, "author": "Jules", "text": "First, Id say that the answer you link to overstates that particular issue and that the primary evil of global state is that it introduces coupling in unpredictable ways that can make it difficult to change the behaviour of your system in future. But delving into this issue further, there are differences between global state in a typical object-oriented application and the state that is held in a database. Briefly, the most important of these are: Object-oriented systems allow replacing an object with a different class of object, as long as it is a subtype of the original type. This allows behaviour to be changed, not just data. Global state in an application does not typically provide the strong consistency guarantees that a database does -- there are no transactions during which you see a consistent state for it, no atomic updates, etc. Additionally, we can see database state as a necessary evil; it is impossible to eliminate it from our systems. Global state, however, is unnecessary. We can entirely eliminate it. So even were the issues with a database just as bad, we can still eliminate some of the potential problems and a partial solution is better than no solution."} +{"doc_id": 494750, "author": null, "text": "First, what are the problems with global variables, based on the accepted answer to the question you linked? Very briefly, it makes program state unpredictable. Databases are, the vast majority of the time, ACID compliant. ACID specifically addresses the underlying issues that would make a data store unpredictable or unreliable. Further, global state hurts the readability of your code. This is because global variables exist in a scope far away from their usage, maybe even in a different file. When using a database, you are using a record set or ORM object that is local to the code you are reading (or should be). Database drivers typically provide a consistent, understandable interface to access data that is the same regardless of problem domain. When you get data from a database, your program has a copy of the data. Updates are atomic. Contrast to global variables, where multiple threads or methods may be operating on the same piece of data with no atomicity unless you add synchronization yourself. Updates to the data are unpredictable and difficult to track down. Updates may be interleaved, causing bog-standard textbook examples of multithreaded data corruption (e.g. interleaved increments). Databases typically model different data than global variables to begin with, but leaving that aside for a moment, databases are designed from the ground-up to be an ACID-compliant data store that mitigates many of the concerns with global variables."} +{"doc_id": 494755, "author": "svidgen", "text": "Id offer a few observations: Yes, a database is global state. In fact, its a super-global state, as you pointed out. Its universal! Its scope entails anything or anyone that connects to the database. And, I suspect lots of folks with years of experience can tell you horror stories about how strange things in the data led to unexpected behavior in one or more of the relevant applications... One of the potential consequences of using a global variable is that two distinct modules will use that variable for their own distinct purposes. And to that extent, a database table is no different. It can fall victim to the same problem. Hmm ... Heres the thing: If a module doesnt operate extrinsically in some way, it does nothing. A useful module can be given data or it can find it. And, it can return data or it can modify state. But, if it doesnt interact with the external world in some way, it may as well do nothing. Now, our preference is to receive data and return data. Most modules are simply easier to write if they can be written with utter disregard for what the outside world is doing. But ultimately, something needs to find the data and modify that external, global state. Furthermore, in real-world applications, the data exists so that it can be read and updated by various operations. Some issues are prevented by locks and transactions. But, preventing these operations from conflicting with each other in principle, at the end of the day, simply involves careful thinking. (And making mistakes...) But also, were generally not working directly with the global state. Unless the application lives in the data layer (in SQL or whatever), the objects our modules work with are actually a copies of the shared global state. We can do whatever we want those without any impact to the actual, shared state. And, in cases where we need to mutate that global state, under the assumption that the data we were given hasnt changed, we can generally perform the same-ish sort of locking that we would on our local globals. And finally, we usually do different things with databases than we might with naughty globals. A naughty, broken global looks like this: Int32 counter = 0; public someMethod() { for (counter = 0; counter < whatever; counter++) { // do other stuff. } } public otherMethod() { for (counter = 100; counter < whatever; counter--) { // do other stuff. } } We simply dont use databases for in-process/operational stuff like that. And it might be the slow nature of the database and the relative convenience of a simple variable that deters us: Our sluggish, awkward interaction with databases simply make them bad candidates for many of the mistakes weve historically made with variables."} +{"doc_id": 134308, "author": "Broam", "text": "You may also use ADB to remove applications, but the application methods are easier. Since use of ADB does not require a market, this will work for users who are rooted but who cannot or do not wish to use Android market or similar marketplaces. ./adb remount #ROOT IS REQUIRED TO REMOUNT /system read-write ./adb shell cd /system/app To see whats installed: pm list packages To remove a package: rm PackageName.apk pm uninstall class.name.of.package CyanogenMod, as far as I can tell, only requires the rm step, not the pm step; if you are running a de-odexed version of Android, you may only need the rm step. If you are concerned you might need the application in the future (over the air updates), you can use adb pull to copy the apk to your desktop (instead of moving it to another location on your phone) or just mv the apk to your uSD card or USB storage. See http://wiki.cyanogenmod.com/wiki/Barebones for more information."} +{"doc_id": 494757, "author": "David Hammen", "text": "But when I look at that, I cant help but think that thats a really weak explanation, because how is that any different from working with data stored in a database? Or any different from a working with an interactive device, with a file, with shared memory, etc. A program that does exactly the same thing every time it runs is a very boring and rather useless program. So yes, its a weak argument. To me, the difference that make a difference with regard to global variables is that they form hidden and unprotected lines of communication. Reading from a keyboard is very obvious and protected. I have to make a certain function call, and I cannot access the keyboard driver. The same applies to file access, shared memory, and your example, databases. Its obvious to the reader of the code that this function reads from the keyboard, that function accesses a file, some other function accesses shared memory (and there had better be protections around that), and yet some other function accesses a database. With global variables, on the other hand, its not obvious at all. The API says to call foo(this_argument, that_argument). Theres nothing in the calling sequence that says the global variable g_DangerWillRobinson should be set to some value but before calling foo (or examined after calling foo). Google banned the use of non-const reference arguments in C++ primarily because it is not obvious to the reader of the code that foo(x) will change x because that foo takes a non-constant reference as an argument. (Compare with C#, which dictates that both the function definition and the call site must qualify a reference parameter with the ref keyword.) While I do not agree with the Google standard on this, I do understand their point. Code is written once and modified a few times, but if its at all good, it is read many, many times. Hidden lines of communications are very bad karma. C++s non-const reference represent a minor hidden line of communication. A good API or a good IDE will show me that Oh! This is call by reference. Global variables are a huge hidden line of communication."} +{"doc_id": 167078, "author": null, "text": "None. Just like Debian, Arch Linux and Gentoo are not based on any other distributions, Android is too not based on any existing distribution. In fact it is not typical GNU\\Linux distributions. It is a Linux Distribution but Not GNU\\Linux Distribution"} +{"doc_id": 68775, "author": "Oleksii Chekulaiev", "text": "If someone like me is having trouble enabling Session Restoration in iTerm 2.9 beta (or iTerm3 beta how they call it) do next: Open preferences General tab Startup Change to Use System window restoration setting. Restart. Probably for tech geeks its obvious that one should select this option for this feature to work but for me it was not and I have never seen this mentioned anywhere."} +{"doc_id": 494760, "author": "Michael Anderson", "text": "I disagree with the fundamental claim that: When your program is working with data from a database, you dont care if other code in your system is changing it, or even if an entirely different program is changing it, for that matter. My initial thought was Wow. Just Wow. So much time and effort is spent trying to avoid exactly this - and working out what trade-offs and compromises work for each application. To just ignore it is a recipe for disaster. But I also diasgree on an architectural level. A global variable is not just global state. Its global state that is accessible from anywhere transparently. In contrast to use a database you need to have a handle to it - (unless you store than handle in a global variable....) For example using a global variable might look like this int looks_ok_but_isnt() { return global_int++; } int somewhere_else() { ... int v = looks_ok_but_isnt(); ... } But doing the same thing with a database would have to be more explicit about what its doing int looks_like_its_using_a_database( MyDB * db ) { return db->get_and_increment(v); } int somewhere_else( MyBD * db ) { ... v = looks_like_its_using_a_database(db); ... } The database one is obviously mucking with a database. If you wanted to not use a database you can use explicit state and it looks almost the same as the database case. int looks_like_it_uses_explicit_state( MyState * state ) { return state->v++; } int somewhere_else( MyState * state ) { ... v = looks_like_it_uses_explicit_state(state); ... } So I would argue using a database is much more like using explicit state, than using global variables."} +{"doc_id": 101546, "author": "BaseZen", "text": "An attempt to clean up the accepted answers style and logic, making it as version-independent as possible, using softwareupdate to its full capacity and introducing a re-usable os variable: #!/bin/bash # Requires root os=$(sw_vers -productVersion | awk -F. {print $1 . $2}) if softwareupdate --history | grep --silent Command Line Tools.*${os}; then echo Command-line tools already installed. else echo Installing Command-line tools... in_progress=/tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress touch ${in_progress} product=$(softwareupdate --list | awk /\\* Command Line.*${os}/ { sub(/^ \\* /, \\\\); print }) softwareupdate --verbose --install ${product} || echo Installation failed. 1>&2 && rm ${in_progress} && exit 1 rm ${in_progress} echo Installation succeeded. fi"} +{"doc_id": 593070, "author": "whoami", "text": "Yes, there are some examples of malicious CSV files causing random code execution. People choose to open CSV files in MS Excel or Open Office or such software which have macro execution capabilities. Some examples: https://www.contextis.com//resources/blog/comma-separated-vulnerabilities/ https://hackerone.com/reports/72785 If your environment does not use popular applications such as MS Excel to open CSVs, the risk is significantly reduced. I would also look for the presence of external, potentially malicious links in the downloaded CSV that might be hosting drive-by downloads (hence you would want to avoid visiting these links)."} +{"doc_id": 134322, "author": "remmy", "text": "As far as I know, you cannot download directly from the web Market, but you can find many apps on other app sites."} +{"doc_id": 134323, "author": "ale", "text": "You cannot download apk files from http://play.google.com/store. There are other ways to get apps (alternative markets, developer sites) and you can install them manually provided you can turn on the Unknown sources setting."} +{"doc_id": 396466, "author": "SoftwareRockstar", "text": "I disagree with dsimchas answer for the most part: Teaching OO from the beginning is not really a bad idea within itself, neither is teaching procedural languages. Whats important is that we teach people to write clear, concise, cohesive code, regardless of OO or procedural. Individual methods in good OO programs DO NOT tend to be procedural looking at all. This is becoming more and more true with evolution of OO languages (read C# because other than C++ thats the only other OO language I know) and their syntax thats getting more complex by the day (lambdas, LINQ to objects, etc.). The only similarity between OO methods and procedures in procedural languages is the linear nature of each, which I doubt would change anytime soon. You cant master a procedural language without understanding data structures either. The pointer concept is as important for procedural languages as for OO languages. Passing parameters by reference, for example, which is quite common in procedural languages, requires you to understand pointers as much as its required to learn any OO language. I dont think that design patterns should be taught early in OO programming at all, because they are not fundamental to OO programming at all. One can definitely be a good OO programmer without knowing anything about design patterns. In fact a person can even be using well-known design patterns without even knowing that they are documented as such with proper names and that books are written about them. What should be taught fundamentally is design principles such as Single Responsibility, Open Close, and Interface Segregation. Unfortunately, many people who consider themselves OO programmers these days are either not familiar with this fundamental concept or just choose to ignore it and thats why we have so much garbage OO code out there. Only after a thorough understanding of these and other principles should design patterns be introduced. To answer original posters question, yes, OO is a harder concept to understand than procedural programming. This is because we do not think in terms of properties and methods of real life objects. For example, human brain does not readily think of TurnOn as a method of TV, but sees it as a function of human turning on the TV. Similarly, polymorphism is a foreign concept to a human brain that generally sees each real life object by only one face. Inheritance again is not natural to our brains. Just because I am a developer does not mean that my son would be one. Generally speaking, human brain needs to be trained to learn OO while procedural languages are more natural to it."} +{"doc_id": 363706, "author": "Dartmoor Tom", "text": "On Debian (Bullseye) Ive found the simplest (?) free way is to use Scribus 1.5.5 which can easily import a PDF (this may be possible in earlier releases, too): Import the PDF, then make an image box where you want your signature, choose your signature file, resize as necessary and then export as a new PDF (of course, you can use a text box to place necessary text such as date etc.) Its no different ultimately to using GIMP or similar, but if youre familiar with Scribus then its a matter of seconds to do it. Ive just done it twice for signing off accounts, which is how I ended up here ;)"} +{"doc_id": 494779, "author": "JeffO", "text": "A database can be a global state, but it doesnt have to be all the time. I disagree with the assumption that you dont have control. One way to manage that is locking and security. This can be done at the record, table or entire database. Another approach is to have some sort of version field that would prevent the changing of a record if the data are stale. Like a global variable, the value(s) in a database can be changed once they are unlock, but there are many ways to control the access (Dont give all the devs the password to the account allowed to change data.). If you have a variable that has limited access, its not very global."} +{"doc_id": 199875, "author": "hiro", "text": "HIGHMEM is a range of kernels memory space, but it is NOT memory you access but its a place where you put what you want to access. A typical 32bit Linux virtual memory map is like: 0x00000000-0xbfffffff: user process (3GB) 0xc0000000-0xffffffff: kernel space (1GB) (CPU-specific vector and whatsoever are ignored here). Linux splits the 1GB kernel space into 2 pieces, LOWMEM and HIGHMEM. The split varies from installation to installation. If an installation chooses, say, 512MB-512MB for LOW and HIGH mems, the 512MB LOWMEM (0xc0000000-0xdfffffff) is statically mapped at the kernel boot time; usually the first so many bytes of the physical memory is used for this so that virtual and physical addresses in this range have a constant offset of, say, 0xc0000000. On the other hand, the latter 512MB (HIGHMEM) has no static mapping (although you could leave pages semi-permanently mapped there, but you must do so explicitly in your driver code). Instead, pages are temporarily mapped and unmapped here so that virtual and physical addresses in this range have no consistent mapping. Typical uses of HIGHMEM include single-time data buffers."} +{"doc_id": 101574, "author": "gnasher729", "text": "It is not (yet) supported and there is a good reason. An iPhone 6 in Zoom mode has a screen just like an iPhone 5. An iPhone 6+ in Zoom mode has a screen just like an iPhone 6. An iPhone X in Zoom mode would be another new screen size, with a new size of the notch. (The notch would physically be the same size, but because of zoom mode it would be fewer pixels). At the moment there are still many apps that support the iPhone X, but in a slightly hacked way: If the developer didnt want to switch to a newer SDK, they could check the size of the phone, and if it is iPhone X sized then the developer assumes it has an iPhone X sized notch and an iPhone X sized area for the home button at the bottom. These developers wouldnt think of supporting iPhone X in zoom mode, so things wouldnt work. Another problem is that as long as an app runs fine on iPhone 5, it will run fine on iPhone 6 in zoom mode, so I can test on an iPhone 5 which I need to test anyway. To test iPhone X in zoom mode, I have to switch zoom mode on which is a pain because it restarts all apps."} +{"doc_id": 363723, "author": "villasv", "text": "A dangerous one-liner that doesnt require source: export $(xargs