diff --git a/Amazon SageMaker/31st image.png b/Amazon SageMaker/31st image.png new file mode 100644 index 0000000..00795d4 Binary files /dev/null and b/Amazon SageMaker/31st image.png differ diff --git a/Amazon SageMaker/cluster1.png b/Amazon SageMaker/cluster1.png new file mode 100644 index 0000000..d39fd8e Binary files /dev/null and b/Amazon SageMaker/cluster1.png differ diff --git a/Amazon SageMaker/file imported.png b/Amazon SageMaker/file imported.png new file mode 100644 index 0000000..2cd402b Binary files /dev/null and b/Amazon SageMaker/file imported.png differ diff --git a/Amazon SageMaker/firstcluster.png b/Amazon SageMaker/firstcluster.png new file mode 100644 index 0000000..2c7ba80 Binary files /dev/null and b/Amazon SageMaker/firstcluster.png differ diff --git a/Amazon SageMaker/jupyter.png b/Amazon SageMaker/jupyter.png new file mode 100644 index 0000000..666ca0e Binary files /dev/null and b/Amazon SageMaker/jupyter.png differ diff --git a/Amazon SageMaker/lastcluster.png b/Amazon SageMaker/lastcluster.png new file mode 100644 index 0000000..349ce28 Binary files /dev/null and b/Amazon SageMaker/lastcluster.png differ diff --git a/Amazon SageMaker/model.png b/Amazon SageMaker/model.png new file mode 100644 index 0000000..c6fa70e Binary files /dev/null and b/Amazon SageMaker/model.png differ diff --git a/Amazon SageMaker/new bucket.png b/Amazon SageMaker/new bucket.png new file mode 100644 index 0000000..5d98dd0 Binary files /dev/null and b/Amazon SageMaker/new bucket.png differ diff --git a/Amazon SageMaker/output and data.png b/Amazon SageMaker/output and data.png new file mode 100644 index 0000000..2a3c590 Binary files /dev/null and b/Amazon SageMaker/output and data.png differ diff --git a/Amazon SageMaker/request.png b/Amazon SageMaker/request.png new file mode 100644 index 0000000..a1d4fad Binary files /dev/null and b/Amazon SageMaker/request.png differ diff --git a/Amazon SageMaker/runcode1.png b/Amazon SageMaker/runcode1.png new file mode 100644 index 0000000..f390aad Binary files /dev/null and b/Amazon SageMaker/runcode1.png differ diff --git a/Amazon SageMaker/runcode2.png b/Amazon SageMaker/runcode2.png new file mode 100644 index 0000000..db0201e Binary files /dev/null and b/Amazon SageMaker/runcode2.png differ diff --git a/Amazon SageMaker/runcode3.png b/Amazon SageMaker/runcode3.png new file mode 100644 index 0000000..434d476 Binary files /dev/null and b/Amazon SageMaker/runcode3.png differ diff --git a/Amazon SageMaker/runcode4.png b/Amazon SageMaker/runcode4.png new file mode 100644 index 0000000..f5a6393 Binary files /dev/null and b/Amazon SageMaker/runcode4.png differ diff --git a/Amazon SageMaker/training completed.png b/Amazon SageMaker/training completed.png new file mode 100644 index 0000000..0dc54d2 Binary files /dev/null and b/Amazon SageMaker/training completed.png differ diff --git a/Intro to S3/Eiffel.jpg b/Intro to S3/Eiffel.jpg new file mode 100644 index 0000000..91f994e Binary files /dev/null and b/Intro to S3/Eiffel.jpg differ diff --git a/Intro to S3/Screen Shot 2018-10-28 at 2.15.38 AM.png b/Intro to S3/Screen Shot 2018-10-28 at 2.15.38 AM.png new file mode 100644 index 0000000..94c3f78 Binary files /dev/null and b/Intro to S3/Screen Shot 2018-10-28 at 2.15.38 AM.png differ diff --git a/Intro to S3/Sheep.jpg b/Intro to S3/Sheep.jpg new file mode 100644 index 0000000..c11f469 Binary files /dev/null and b/Intro to S3/Sheep.jpg differ diff --git a/Intro to S3/access_denied.png b/Intro to S3/access_denied.png new file mode 100644 index 0000000..1a64079 Binary files /dev/null and b/Intro to S3/access_denied.png differ diff --git a/Intro to S3/configure_bucket.png b/Intro to S3/configure_bucket.png new file mode 100644 index 0000000..b1a3c1f Binary files /dev/null and b/Intro to S3/configure_bucket.png differ diff --git a/Intro to S3/create_bucket.png b/Intro to S3/create_bucket.png new file mode 100644 index 0000000..d2a8e41 Binary files /dev/null and b/Intro to S3/create_bucket.png differ diff --git a/Intro to S3/have_access.png b/Intro to S3/have_access.png new file mode 100644 index 0000000..7fb811c Binary files /dev/null and b/Intro to S3/have_access.png differ diff --git a/Intro to S3/no_access.png b/Intro to S3/no_access.png new file mode 100644 index 0000000..8d594b6 Binary files /dev/null and b/Intro to S3/no_access.png differ diff --git a/Intro to S3/permissions.png b/Intro to S3/permissions.png new file mode 100644 index 0000000..59ca9cb Binary files /dev/null and b/Intro to S3/permissions.png differ diff --git a/Intro to S3/policy.png b/Intro to S3/policy.png new file mode 100644 index 0000000..f39784e Binary files /dev/null and b/Intro to S3/policy.png differ diff --git a/Intro to S3/policy_generated.png b/Intro to S3/policy_generated.png new file mode 100644 index 0000000..52f7d2b Binary files /dev/null and b/Intro to S3/policy_generated.png differ diff --git a/Intro to S3/policy_json.png b/Intro to S3/policy_json.png new file mode 100644 index 0000000..522ba9d Binary files /dev/null and b/Intro to S3/policy_json.png differ diff --git a/Intro to S3/review.png b/Intro to S3/review.png new file mode 100644 index 0000000..3ccae96 Binary files /dev/null and b/Intro to S3/review.png differ diff --git a/Intro to S3/set_permission.png b/Intro to S3/set_permission.png new file mode 100644 index 0000000..2fa29ad Binary files /dev/null and b/Intro to S3/set_permission.png differ diff --git a/Intro to S3/upload_file.png b/Intro to S3/upload_file.png new file mode 100644 index 0000000..43a91fa Binary files /dev/null and b/Intro to S3/upload_file.png differ diff --git a/Intro to S3/uploaded_image.png b/Intro to S3/uploaded_image.png new file mode 100644 index 0000000..66c2970 Binary files /dev/null and b/Intro to S3/uploaded_image.png differ diff --git a/Intro to S3/version1.png b/Intro to S3/version1.png new file mode 100644 index 0000000..201174c Binary files /dev/null and b/Intro to S3/version1.png differ diff --git a/Intro to S3/version2.png b/Intro to S3/version2.png new file mode 100644 index 0000000..e9e15e4 Binary files /dev/null and b/Intro to S3/version2.png differ diff --git a/Intro to S3/version_json.png b/Intro to S3/version_json.png new file mode 100644 index 0000000..d4120ba Binary files /dev/null and b/Intro to S3/version_json.png differ diff --git a/Intro to S3/versions.png b/Intro to S3/versions.png new file mode 100644 index 0000000..760b193 Binary files /dev/null and b/Intro to S3/versions.png differ diff --git a/README.md b/README.md index 37d2d46..51ad374 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,283 @@ # Distributed Systems Practice Notes from learning about distributed systems in [GW CS 6421](https://gwdistsys18.github.io/) with [Prof. Wood](https://faculty.cs.gwu.edu/timwood/) -## Area 1 -> Include notes here about each of the links +## Big Data and Machine Learning +### Video: Hadoop Intro (45min) +Challenges of Distributed Systems +- High chances of system failure +- Limit on bandwidth +- High programming complexity +Solution: Hadoop! -## Area 2 -> Include notes here about each of the links +Four key characteristics of Hadoop: +Economical: Ordinary computers can be used for data processing +Reliable: Stores copies of the data on different machines and is resistant to hardware failure +Scalable: can follow both horizontal and vertical scaling +Flexible: can store as much of the data and decide to use it later +Hadoop Distributed File System +- A storage layer for Hadoop +- Suitable for the distributed storage and processing +- Hadoop provides a command line interface to interact with HDFS +- Streaming access to file system data +- Provides file permissions and authentication + +HBase stores data in HDFS. It is a NoSQL database or non-relational database. It is mainly used when you need random, real-time, read/write access to your Big Data. It provides support to high volume of data and high throughput. The table can have thousands of columns. + +Sqoop is a tool designed to transfer data between Hadoop and relational database servers. It is used to import data from relational databases such as, Oracle and MySQL to HDFS and export data from HDFS to relational databases. + +Flume is a distributed service for ingesting streaming data. It is ideally suited for event data from multiple systems. + +Spark is an open-source cluster computing framework. It provides 100 times faster performance as compared to MapReduce. It supports Machine learning, Business intelligence, Streaming and Batch processing. + +Hadoop MapReduce is the original Hadoop processing engine which is primarily Java based. It is based on the map and reduce programming model. + +Pig is an open-source dataflow system. It converts pig script to Map-Reduce code. It is an alternate to writing Map-Reduce code. Best for ad-hoc queries like join and filter. + +Impala is a high performance SQL engine which runs on Hadoop cluster. It is ideal for interactive analysis. It has very low latency and it supports a dialect of SQL. + +Hive is best for data processing and ETL. It executes queries using MapReduce. + +Cloudera Search is a near-real-time access product. It enables non-technical users to search and explore data stored in or ingested into Hadoop and HBase. Users do not need SQL or programming skills to use Cloudera Search. It is a fully integrated data processing platform. + +Oozie is a workflow or coordination system used to manage the Hadoop jobs. + +Hue is an acronym for Hadoop User Experience. It is an open source Web interface for analyzing data with Hadoop. It provides SQL editors for Hive, Impala, MySQL, Oracle, PostgreSQL, Spark SQL, and Solr SQL. + +Four stages of big data processing: +- Ingest +- Processing +- Analyze +- Access + +### QwikLab: Analyze Big Data with Hadoop (80min) +- Create a bucket with S3 service to store log files and output data +- Launch a Hadoop cluster to help with processing data +- Add Hive Script as a step in the cluster to process sample data +- Run the Hive Script and check result in S3 bucket when it is completed +- download the 000000_0 file to local computer to check the output + +### QwikLab: Intro to S3 (25min) +- Create a bucket and configure to allow versioning and set permissions +- upload an image to the bucket +- change the permission setting and make the image public +- create a bucket policy to set permission setting +- get access to different versions of images with the same name + +### QwikLab: Intro to Amazon Redshift (30min) +- Amazon Redshift is a fast, fully managed data warehouse. +- launch a Redshift cluster and configure +- use Pgweb as the SQL interface to Redshift +- create a table by executing SQL commands in Redshift +- import data from S3 and run the Query + +### Video: Short AWS Machine Learning Overview (5min) +Three layers of machine learning: +- Framework and interfaces +- Machine learning platforms +- Application services + +### AWS Tutorial: Analyze Big Data with Hadoop (80min) +The getting started part of this tutorial is the same as the QwikLab analyze big data with hadoop. +The function of the Hive Script: +``` +- Creates a Hive table schema named cloudfront_logs. For more information about Hive tables, see the Hive Tutorial on the Hive wiki +- Uses the built-in regular expression serializer/deserializer (RegEx SerDe) to parse the input data and apply the table schema. For more information, see SerDe on the Hive wiki +- Runs a HiveQL query against the cloudfront_logs table and writes the query results to the Amazon S3 output location that you specify +``` +### QwikLab: Intro to Amazon Machine Learning (40min) +- create a bucket and upload training data to the bucket +- create a datasource with Amazon machine learning that refers to the training data in the S3 bucket +- create a model from the datasource +- evaluate the generated model +- generate predictions from the machine learning model + +### Docs: AWS Machine Learning (60min) +Datasources: A datasource is an object that contains metadata about your input data. It does not store a copy of your input data. Instead, it stores a reference to the Amazon S3 location where your input data resides. It is used to train and evaluate machine learning model and generate predictions. +Machine Learning Models: A Machine Learning model is a mathematical model that generates predictions by finding patterns in your data. +Evaluations: An evaluation measures the quality of your ML model and determines if it is performing well. +Batch Predictions: Batch predictions are for a set of observations that can run all at once. +Real-time Predictions: Real-time predictions are for applications with a low latency requirement, such as interactive web, mobile, or desktop applications. + +### AWS Tutorial: Build a Machine Learning Model (60min) +- Download the data file banking.csv and banking-batch.csv. In the csv file we can see attribute y as a binary value. Create an S3 bucket and upload the files. +- Create a training datasource and establish the schema. Select y as the target attribute. +- Create an machine learning model. +- Review the performance of the model and set a score threshold. The default was 0.5 and adjust it so that 3% of the records are predicted as "1". Then save it as 0.77. +- Create predictions with the model with real-time predictions and download the prediction file to local computer. + +### Video Tutorial: Overview of AWS SageMaker (40min) +This tutorial introduced SageMaker with a few examples. + +Sagemaker console: +Notebook instance: explore AWS data in your notebooks, and use algorithms to create models via training jobs +Jobs: track training jobs at your desk or remotely. Leverage high-performance AWS algorithms +Models: create models for hosting from job outputs, or import externally trained models into Amazon SageMaker. +Endpoint: deploy endpoints for developers to use in production. A/B test model variants via an endpoint + +The four parts and be linked to be used together but they can also be used separately. + +### AWS Tutorial: AWS SageMaker (80min) +- Create an IAM Administrator User. +- Create a SageMaker Notebook Instance and open the Jupyter dashboard. +- Create a Jupyter notebook to run the code. Download the dataset and display one of the image in the dataset. +- Transform the dataset. There are two ways to do it. I chose to use the high-level Python library provided by Amazon SageMaker. +- Train the model with an algorithm. +- Validate the model with sending the requests. Get the references of the images. The model would divide the images I requested into clusters and each cluster contains similar images. + +### Build a Serverless Real-Time Data Processing App (150min) +#### Module 1 Build a data stream +- Use the Amazon Kinesis Data Streams console to create a new stream named wildrydes with 1 shard +- Produce messages into the stream with the command-line producer +``` +./producer +``` +- Print the messages being sent by the producer with consumer. +``` +./consumer +``` +- Create an Amazon Cognito identity pool to grant unauthenticated users access to read from the Kinesis stream. +- Add a new policy to the unauthenticated role to allow the dashboard to read from the stream to plot the unicorns on the map. +- Open Unicorn Dashboard and there is a unicore on the real-time map. + +#### Module 2 Aggregate data +- Use the Amazon Kinesis Data Streams console to create a new stream named wildrydes-summary with 1 shard +- Build an Amazon Kinesis Data Analytics application which reads from the wildrydes stream built in the previous module and emits a JSON object with Name, StatusTime, Distance, MinMagicPoints, MaxMagicPoints, MinHealthPoints, MaxHealthPoints each minute. +- Start the producer and create the schema. Run following SQL code and see rows arrive every minute: +``` +CREATE OR REPLACE STREAM "DESTINATION_SQL_STREAM" ( + "Name" VARCHAR(16), + "StatusTime" TIMESTAMP, + "Distance" SMALLINT, + "MinMagicPoints" SMALLINT, + "MaxMagicPoints" SMALLINT, + "MinHealthPoints" SMALLINT, + "MaxHealthPoints" SMALLINT +); + +CREATE OR REPLACE PUMP "STREAM_PUMP" AS + INSERT INTO "DESTINATION_SQL_STREAM" + SELECT STREAM "Name", "ROWTIME", SUM("Distance"), MIN("MagicPoints"), + MAX("MagicPoints"), MIN("HealthPoints"), MAX("HealthPoints") + FROM "SOURCE_SQL_STREAM_001" + GROUP BY FLOOR("SOURCE_SQL_STREAM_001"."ROWTIME" TO MINUTE), "Name"; +``` + +#### Module 3 Process streaming data +- Use the Amazon DynamoDB console to create a new DynamoDB table. The name of the table is UnicornSensorData. Partition key: Name +Type: String +Sort key: StatusTime +type: String +- Create an IAM role for the Lambda function. +Name: WildRydesStreamProcessoerRole +- Create a Lambda function. +name: WildRydesStreamProcessor +Environment variable: key TABLE_NAME value UnicornSensorData +use JavaScript code: +``` +'use strict'; + +const AWS = require('aws-sdk'); +const dynamoDB = new AWS.DynamoDB.DocumentClient(); +const tableName = process.env.TABLE_NAME; + +exports.handler = function(event, context, callback) { + const requestItems = buildRequestItems(event.Records); + const requests = buildRequests(requestItems); + + Promise.all(requests) + .then(() => callback(null, `Delivered ${event.Records.length} records`)) + .catch(callback); +}; + +function buildRequestItems(records) { + return records.map((record) => { + const json = Buffer.from(record.kinesis.data, 'base64').toString('ascii'); + const item = JSON.parse(json); + + return { + PutRequest: { + Item: item, + }, + }; + }); +} + +function buildRequests(requestItems) { + const requests = []; + + while (requestItems.length > 0) { + const request = batchWrite(requestItems.splice(0, 25)); + + requests.push(request); + } + + return requests; +} + +function batchWrite(requestItems, attempt = 0) { + const params = { + RequestItems: { + [tableName]: requestItems, + }, + }; + + let delay = 0; + + if (attempt > 0) { + delay = 50 * Math.pow(2, attempt); + } + + return new Promise(function(resolve, reject) { + setTimeout(function() { + dynamoDB.batchWrite(params).promise() + .then(function(data) { + if (data.UnprocessedItems.hasOwnProperty(tableName)) { + return batchWrite(data.UnprocessedItems[tableName], attempt + 1); + } + }) + .then(resolve) + .catch(reject); + }, delay); + }); +} +``` +- Verify that the trigger is properly executing the Lambda function. View the metrics emitted by the function and inspect the output from the Lambda function. +- After running the producer with a unicorn name, we can query the DynamoDB table for data for a specific unicorn. + +#### Module 4 Store & query data +- Create an S3 bucket with the name wildrydes-data-xumo. +- Create an Amazon Kinesis Data Firehose delivery stream named wildrydes that is configured to source data from the wildrydes stream and deliver its contents in batches to the S3 bucket created previously. +- Create an Athena table to query the raw data in S3 bucket. +``` +CREATE EXTERNAL TABLE IF NOT EXISTS wildrydes ( + Name string, + StatusTime timestamp, + Latitude float, + Longitude float, + Distance float, + HealthPoints int, + MagicPoints int + ) + ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe' + LOCATION 's3://wildrydes-data-xumo/'; +``` + +## Cloud Web Apps +### AWS tutorial: Launch a VM (15min) +- open the Amazon EC2 console and launch instance +- configure the virtual machine, create a new key pair and launch it +- use chmod command to make sure the private key is not publicly viewable +``` +chmod 400 Downloads/MyKeyPair.pem +``` +- connect to the instance +``` +ssh -i Downloads/MyKeyPair.pem ec2-user@54.186.234.37 +``` + +### QwikLab: Intro to S3 (25min) +- Create a bucket and configure to allow versioning and set permissions +- upload an image to the bucket +- change the permission setting and make the image public +- create a bucket policy to set permission setting +- get access to different versions of images with the same name diff --git a/Real-time app/ unicorn on the map.png b/Real-time app/ unicorn on the map.png new file mode 100644 index 0000000..c18ef61 Binary files /dev/null and b/Real-time app/ unicorn on the map.png differ diff --git a/Real-time app/change settings.png b/Real-time app/change settings.png new file mode 100644 index 0000000..ccd3cd1 Binary files /dev/null and b/Real-time app/change settings.png differ diff --git a/Real-time app/cognito.png b/Real-time app/cognito.png new file mode 100644 index 0000000..1b6d227 Binary files /dev/null and b/Real-time app/cognito.png differ diff --git a/Real-time app/configure triggers.png b/Real-time app/configure triggers.png new file mode 100644 index 0000000..881db13 Binary files /dev/null and b/Real-time app/configure triggers.png differ diff --git a/Real-time app/consumer printng stream.png b/Real-time app/consumer printng stream.png new file mode 100644 index 0000000..7435a53 Binary files /dev/null and b/Real-time app/consumer printng stream.png differ diff --git a/Real-time app/consumer running.png b/Real-time app/consumer running.png new file mode 100644 index 0000000..d39b22e Binary files /dev/null and b/Real-time app/consumer running.png differ diff --git a/Real-time app/create function.png b/Real-time app/create function.png new file mode 100644 index 0000000..7d9ff87 Binary files /dev/null and b/Real-time app/create function.png differ diff --git a/Real-time app/create stream.png b/Real-time app/create stream.png new file mode 100644 index 0000000..29cfbe5 Binary files /dev/null and b/Real-time app/create stream.png differ diff --git a/Structure.png b/Structure.png new file mode 100644 index 0000000..d6e222b Binary files /dev/null and b/Structure.png differ diff --git a/Technical Report and Practice Round 2.md b/Technical Report and Practice Round 2.md new file mode 100644 index 0000000..1fc4c5a --- /dev/null +++ b/Technical Report and Practice Round 2.md @@ -0,0 +1,352 @@ +# Technical Report +## Extracting Windows 7 System Account Password with Virtual Machine and AWS EC2 + +I took Distributed Systems this semester and I learned a lot about virtual machines and Amazon Web Services. I also learned how to use Amazon EC2 in the practice including how to launch a virtual machine and how to connect to it. In this report I am going to introduce what I learned about virtualization and Amazon EC2 and how I used them to help with one of my assignments in Computer Network Defense class. + +### Virtualization +Traditionally we utilize computing and storage resource base on hardware. But with virtualization technology we can split a physical machine and each part can have its own operating system. It helps us to make full use of hardware resource and makes management easier. + +We put hypervisors between physical machine and virtual machines to divide physical resource. When we are using virtualization on servers we install hypervisors directly on the hardware but when we are using virtualization software such as Virtual Box or VMware on a PC, the hypervisors are on top of the original operating system. + +There are generally four types of virtualization. +1. Application Virtualization + + Application Virtualization can help separating a software from the computer. Users can run the software the same way as it is on top of the computer. For example, when we are writing program with Java Virtual Machine, we can have keyboard and mouse operations even we are running the program inside of the virtual machine. + +2. Hosted Virtualization + + With hosted virtualization, the hypervisor runs on top of the operating system. The hypervisor does not have direct access to hardware resource. For security some sensitive instructions can not be accessed by the virtual machine. The operating system of the computer would process them and send the result back to the virtual machine. + +3. Paravirtualization + + With Paravirtualization the operating system is modified and each guest operating system is aware that it is being virtualized. + +4. Full Virtualization + + Full Virtualization is also called hardware virtualization. As we know servers are using hardware virtualization. The hypervisor is directly installed on hardware and virtual machines can be accessed remotely. + +### Cloud Computing + +- Infrastructure as a Service + + To use IaaS, users rent raw servers and connect to it remotely. They can either launch virtual machines, set up operating systems, install applications or store data as they want. + +- Platform as a Service + + PaaS provides a programming platform where users can write their own programs to run them on the cloud. It can scale resource better but it requires users to write code with the API provided by the service. + +- Software as a Service + + SaaS has the most limited flexibility and the best scalability. It is simply a software that you can access through the internet instead of being installed on your local machine. Such as email service or online work station. Users do not have to worry anything about scalability or management. + +### Extract Windows 7 Password + +A few weeks ago I had an assignment from my Computer Network Defense class. Creating five accounts in a windows 7 or windows 10 system with passwords of varying strengths. The users should have increasingly more difficult passwords. The passwords should be protected by LMHashes. Then extract the passwords and crack them. + +To finish this assignment, I needed to enable LMHashes, create the accounts and set up passwords, extract the passwords hash file and crack the passwords. + +I do not own a computer with windows system so I used a virtual machine from Oracle Virual Box. I downloaded the iso of IE8-Windows 7 virtual machine from Microsoft official website and then loaded it in Virtual Box. Then I enabled LMHashes with Windows Registry Editor. + +- type “run” in search and then type “regedit” in the window to get in registry editor +- go to HKEY_LOCAL_MACHINE -> SYSTEM -> CurrentControlSet -> Control -> Lsa and set the value of NoLmHash 0 + +Once LMHashes is enabled, the accounts and passwords are encrypted. The length of password can not be over 15 characters. + +Create 5 accounts and passwords: + +username | user1 | user2 | user3 | user4 | user5 +------------ | ------------- | ------------- | ------------- | ------------- | ------------- | +password | abc | computer123 | XuMolovesC++ | xumo123!@# | 321XuMo!@# + +The encrypted users passwords are stored in a file named SAM under c:\Windows\system32\config directory. The file can not be opened or moved directly. So I needed to access it in another way. I used another virtual machine to boot this windows system and accessed the file through the interface of the other virtual machine. + +Before the next step I need to introduce the IDE Controller of Virtual Box. Usually the hard dist is built into the virtual machine and when a user accesses it, it presents as "real" storage of the machine. The idea of accessing the SAM file without going through the windows operating system is to attach the windows virtual machine to the kali linux as a hard drive. + +I downloaded kali-linux iso and I went to storage in settings of Windows 7 virtual machine. settings -> General -> Basic, change the Version to Other Windows (64 bit). And then I went to Settings -> Storage where I can set up the attibutes of IDE Controller. Set kali-linux as the IDE Primary Master and set the windows 7 as IDE Primary Slave. + +I opened the windows 7 virtual machine and it appeared as the interface of kali-linux. Now the hard disk of the Windows 7 virtual machine is a part of the hard disk of kali linux. I located the SAM file in media/root/Windows 7/Windows/systems32/cinfig. + +I used the terminal of kali linux to run pwdump on the SAM file and put the output in a txt tile named WinHash.txt. + +```pwdump SYSTEM SAM > /root/Desktop/WinHash.txt``` + +the content of WinHash.txt + +``` +Administrator:500:aad3b435b51404eeaad3b435b51404ee:fc525c9683e8fe067095ba2ddc971889::: +Guest:501:aad3b435b51404eeaad3b435b51404ee:31d6cfe0d16ae931b73c59d7e0c089c0::: +IEUser:1000:aad3b435b51404eeaad3b435b51404ee:fc525c9683e8fe067095ba2ddc971889::: +sshd:1001:aad3b435b51404eeaad3b435b51404ee:31d6cfe0d16ae931b73c59d7e0c089c0::: +sshd_server:1002:aad3b435b51404eeaad3b435b51404ee:8d0a16cfc061c3359db455d00ec27035::: +user1:1008:8c6f5d02deb21501aad3b435b51404ee:e0fba38268d0ec66ef1cb452d5885e53::: +user2:1009:ae6e1b1fccb24d5b65fa44e2fc64e931:7d0305bc1509a6b5cdf89c5ff92b2a5b::: +user3:1010:9677f4a0eebc005cd834de725e83d830:e947f43d95b5bcbc367ab8d81b23cf2a::: +user4:1011:e9739fe1a9e85af587c394d748ef4541:ff13d5238f9d1eb8c683a42b5143f3b5::: +user5:1012:64a75169bfeba9f987c394d748ef4541:1e4da16751efa2c6538c0c721fd0259f::: +``` + +Then I tranfered the WinHash.txt to local computer through email. + +### Amazon EC2 + +I planned on using John the Ripper to crack the password. Since some passwords were complex and it might have to run for a long time. So I wanted to use cloud service to run it remotely so that I didn't have to keep my own computer running for a long time. I chose to use Amazon EC2 to launch a virtual machine to run the password cracking software. + +Amazon EC2 is a kind of Infrastructure as a Service. It provides different sizes of RAM and computing resource. +Users can launch a virtual machine with EC2. It provides some ready-to-go operating systems and distributions such as Red Hat, Amazon Linux, Ubuntu, Microsoft Windows Server, etc. It provides a collection of configuration so that users can use it as they desire. + +I used an ubuntu instance on AWS. Configuration was t2.micro, 1 virtual CPU and memory of 1GiB. I created a keypair and named it XuMoKeyPair.pem. + +Process: + +First I uploaded the WinHash.txt to the ubuntu virtual machine. + +```scp -I XuMoKeyPair.pem WinHash.txt ubuntu@ec2-54-1-72-1-37.compute_1.amazonaws.com:WinHash.txt``` + +![upload the file](https://github.com/XuMo1995/dist-sys-practice/blob/master/Technical%20Report/upload%20the%20file%20to%20EC2.png) + +Then I connected to the virtual machine. + +```ssh -I “XuMoKeyPair.pem” ubuntu@ec2-54-172-1-37.compute-1.amazonaws.com``` + +![connect](https://github.com/XuMo1995/dist-sys-practice/blob/master/Technical%20Report/connect%20to%20EC2.png) + +I installed john the ripper with apt-get. + +```sudo apt-get install john``` + +![install john](https://github.com/XuMo1995/dist-sys-practice/blob/master/Technical%20Report/install%20john.png) + +After john the ripper was installed properly I started cracking the password. + +```john WinHash.txt``` + +Checked the result once it was finished. + +```john WinHash.txt --show``` + +![cracked passwords](https://github.com/XuMo1995/dist-sys-practice/blob/master/Technical%20Report/show%20result.png) + + + + + + + + +# Practice + +# Dockers and Containers (beginner) + +## Why docker? (20min) +Docker is fast +- Implementing and maintaining a software is faster on Docker + +Docker can be adopted quickly +- Every system, software and function runs the same way on Docker as on its original environment therefore no code changing is needed while transferring them to Docker +Docker has saved software developers and maintainers a lot of time because it is fast and it saves a lot of work during the life cycle of software developing. + +## Lab: DevOps Docker Beginners Guide (60min) + +What happens when you run Hello World: + +When you run the command “docker container run hello-world”, the engine would start trying to find the image with the name hello-world locally. Since the engine couldn’t find it, it would go to the default Docker Registry to keep looking for it. The engine would find the image and run it in a container. + +Comparing a virtual machine and a container: + +Virtual Machine | Container +------------ | ------------- +Hardware Abstraction| Application Abstraction +A virtual machine has a full OS | Multiple containers share OS kernel + +Docker images + +Commands I learned: +- docker image pull – fetches a certain image and save it +- docker image ls – shows all images in the system +- docker container run – runs a container +- docker container run alpine echo “hello from alpine” – gets the output “hello from alpine” +- docker container run alpine /bin/sh – runs and then exit a shell +- docker container ls – shows all the running containers +- docker container ls -a – shows all the containers you ran + +# Cloud Web Apps (intermediate) + +## Video: virtualization (15min) +- Starting from the 1970s, invented by IBM +- The idea of kernel mode/supervisor mode: run applications on top of OS and make OS have more access to the machine so that applications would not cause the machine to crash +- Xen virtual machine: open-source and popular +- Hypervisor: accesses the physical device + + +## AWS Tutorial: Install a LAMP Web Server on Amazon Linux 2 (40min) +- Preparation: + - a new Amazon Linux 2 instance + - security group configuration: SSH (port 22), HTTP (port 80), HTTPS (port 443) + - update software packages ```sudo yum update -y ``` +- Install LAMP and Apache web server + - install lamp-mariadb10.2-php7.2 and php7.2 Amazon Linux Extras repositories ```sudo amazon-linux-extras install -y lamp-mariadb10.2-php7.2 php7.2``` + - install Apache web server and MariaDB ```sudo yum install -y httpd mariadb-server``` + - start httpd + - set file permissions to allow user to add, delete and edit files +- Test the LAMP server: + - Create a PHP file in the Apache document root. Type the URL of the file in a browser there should be a PHP information page if the server is running correctly and the PHP file was created properly. + +- Secure the database server: + - Set a password for the root account + - Remove the insecure features including anonymous user accounts, remote root login, test database + - Reload privilege tables + +## On your own: compare the performance, functionality and price when serving web content from S3 versus and EC2 VM (10min) +components | S3 | EC2 | comment +------------ | ------------- | ------------- | ------------- +performance | Fast and reliable | Manually scalable | +price | $0.023 per GB within first 50 TB $0.022 per GB 50 – 500 TB $0.021 per GB over 500 TB | From $0.025 per GB to $0.10 per GB according to storage type | S3 is a lot cheaper than EC2 +functionality | Better for static content | Better for running programs | + +## QwikLab: Intro to DynamoDB (30min) +Amazon Dynamo DB is a kind of NoSQL database service which provides both document and key-value data models. +This lab shows how to create a table in Amazon DynamoDB and enter data into the table. Then query the table and delete it. + +Create a table: +- Each table has to have a Primary Key. It can also have a Sort Key. +Add data to the table: +- Structure: Data is stored in tables. A table contains items. An item is a collection of attributes. +- This is how I understand it: + +DynamoDB | Other database systems +------------ | ------------- +table | table +items | rows +attributes | columns + +Query the table – query or scan +- Query is much faster than scan +Delete the table + +## Deploy a Node.js Web App (80min) +- Tools and technologies we are going to use: + - AWS Elastic Beanstalk + - Amazon DynamoDB + - Node.js + - Express + - NoSQL database + +- launch an Elastic Beanstalk Environment with following configuration: + - Platform: Node.js + - Application code: Sample Code + - Others: default settings +- Add permissions to environment’s instances + - What are permissions for: when a request comes in the application would access the services with the permissions + - The policies for this application: AmazonDynamoDBFullAccess and AmazonSNSFullAccess +- Deploy the sample application + - This tutorial provides a source bundle, we only need to download it and upload and deploy it on the Elastic Beanstalk console + - It collects contact information of users and store it in a DynamoDB table + - We can add data to it by sign up on the webpage +- Create a DynamoDB table and configure + - Set email as the primary key and the type of value is string + - Change the minimum instance number from 1 to 2 so that when there is a failure in one of the instance the application can still be available + +## Intro to AWS Lambda (30min) +In this lab we learn how to create a serverless application with AWS Lambda and AWS S3. + +Background knowledge and terminologies: +- Lambda is a compute service on which we can build serverless applications. It would provide server management for us. We only need to pay as the actual computing time +- Blueprints: code templates for some standard functions + +Process of the lab and takeaways: + +First we create an input bucket and an output bucket in AWS S3 and upload the test picture to the input bucket. Then we create a Lambda function and the trigger. It is a serverless service so it only runs when it is triggered. That is why we need the trigger. We configure it as using Object Created as the event type and the image we just uploaded to input bucket as the object. + +Then we create Thumbnail file for the function which resize the picture and upload it to the output bucket. Then we can test the function by creating an event so that the function would be triggered once we upload a new image to the bucket. + +Monitoring it and looking at the logs can be very helpful for trouble shooting and perfecting our function. On the monitoring console we can know about the innovation times, execution time and the errors generated. + +We can use these information to scale our function to improve it. For example we know the errors it generated then we can create replications for better availability. + +## Intro to Amazon API Gateway (70min including looking at some other documents) +Background knowledge: + +- A microservice is to break down a large and complex system into independent services so that it is easier to improve availability and make the system easier to maintain. +- API (application programming interface): it defines a collection of functionalities for developers. It shows how the components interact. +- API Gateway: a managed service that makes creating, deploying and maintaining APIs easy. + +Process of the lab: +- Create a lambda function. +- Choose API Gateway when add triggers +- Test the lambda function + +Summary: + +This lab is similar to the previous intro to AWS Lambda lab. The focus is getting a better understanding of some of the components of AWS Lambda and we learn about API Gateway. API Gateway holds APIs together to provide developers more control and make the management a lot easier. We also learn about the concept of micro services in this lab. It is a structure that helps with increasing the availability and scalability of a system. + + +## Build a serverless web application (180min) + +In previous practice of Big Data and Machine Learning I have learnt how to build a serverless real-time application using AWS Lambda, DynamoDB and S3. But this application has involved some new functions of AWS such as Amazon Cognito User Pool and API Gateway. + +#### Process and takeaways + +Web Hosting: + +Amazon S3 is a good choice for storing static content of a website. It is cheap and it provides good performance. I have used Amazon S3 for many times in previous practice so I used a template to build it. + +Manage Users + +Tool and Technology + +- Amazon Cognito user pool is a very helpful tool for handling authentication. Developers can use it to set up requirements for new users to register. In this lab when a user registered with email address. Amazon Cognito will send a confirmation email to that address with a verification code in it. A user can only sign in after verifying identity. + +Implementation + +- Services -> mobile services -> Cognito +- manage your user pools -> create a new user pool -> name it WildRydes -> review defaults -> create pool +- General settings -> app clients -> add an app client -> name the app client WildRydesWebApp -> create app client without generating client secret +- download config.js and modify the IDs -> upload it to S3 +- Giddy up -> register -> let's ryde (I used a fake email in this lab so I verified manually) + +Serverless Service Backend + +Tool and Technology +- Serverless Service is that the server would not always be running. When a request comes in asking for a function, it handles that request in a container that runs the certain function. The same container would be reused if there is another request for the same function. When there is another request that askes for a different function it would open another container for it. The container would be turned off when it is killed or timeout. + +- Amazon Lambda - implement a Lambda function. When a user requests a unicore this function would be triggered + +- Amazon DynamoDB - the database + +Implementation: create a DynamoDB table -> create an IAM role -> create a function -> test + +RESTful APIs + +Tools and Technology + +- Amazon API Gateway -- expose the Lambda function as an API which is accessible on the public internet + +- Amazon Cognito handles the authentication for the API + +Implementation: Create a New REST API -> Create a Cognito User Pools Authorizer -> create a new resource with the API -> create a POST method -> Deploy the API + +## build a modern web application -- Mythical Mysfits (300min) + +Tools and Technology: + +![Structure Diagram](https://github.com/XuMo1995/dist-sys-practice/blob/master/Structure.png) + +- use AWS cloud9 as IDE and clone the project to it +- Amazon S3 -- we host the website in an S3 bucket and use bucket policy to handle authentication +- AWS Fargate -- we use container to build microservice backend + - control and flexibility + - minimize maintainance +- Network Load Balancer -- listener, balancer, target group + - it handles connection requests + - distribute traffic to target groups + - connects users and AWS Fargate + - connects internet and the service of the application +- AWS code services + - developers write program in cloud 9 and push code to AWS codecommit + - AWS codepipeline pulls changes from codecommit + - build project in CodeBuild + - deploy in AWS Fargate +- AWS DynamoDB -- NoSQL database service that stores data +- AWS API Gateway -- between user interface and Network Load Balancer to handle user authentication +- AWS Lambda -- collects real-time user requests +- AWS Kinesis Data Firehouse -- takes data records and puts them in S3 bucket diff --git a/Technical Report/change windows version.png b/Technical Report/change windows version.png new file mode 100644 index 0000000..af882a9 Binary files /dev/null and b/Technical Report/change windows version.png differ diff --git a/Technical Report/connect to EC2.png b/Technical Report/connect to EC2.png new file mode 100644 index 0000000..e5d7b43 Binary files /dev/null and b/Technical Report/connect to EC2.png differ diff --git a/Technical Report/install john.png b/Technical Report/install john.png new file mode 100644 index 0000000..b113f03 Binary files /dev/null and b/Technical Report/install john.png differ diff --git a/Technical Report/primary master.png b/Technical Report/primary master.png new file mode 100644 index 0000000..a77f3c3 Binary files /dev/null and b/Technical Report/primary master.png differ diff --git a/Technical Report/primary slave.png b/Technical Report/primary slave.png new file mode 100644 index 0000000..dc07586 Binary files /dev/null and b/Technical Report/primary slave.png differ diff --git a/Technical Report/run john the ripper.png b/Technical Report/run john the ripper.png new file mode 100644 index 0000000..8b59bdb Binary files /dev/null and b/Technical Report/run john the ripper.png differ diff --git a/Technical Report/show result.png b/Technical Report/show result.png new file mode 100644 index 0000000..73e62f6 Binary files /dev/null and b/Technical Report/show result.png differ diff --git a/Technical Report/upload the file to EC2.png b/Technical Report/upload the file to EC2.png new file mode 100644 index 0000000..393ac13 Binary files /dev/null and b/Technical Report/upload the file to EC2.png differ diff --git a/analyze big data with hadoop/cluster configuration.png b/analyze big data with hadoop/cluster configuration.png new file mode 100644 index 0000000..1ffc2b4 Binary files /dev/null and b/analyze big data with hadoop/cluster configuration.png differ diff --git a/analyze big data with hadoop/cluster waiting.png b/analyze big data with hadoop/cluster waiting.png new file mode 100644 index 0000000..3d8888b Binary files /dev/null and b/analyze big data with hadoop/cluster waiting.png differ diff --git a/analyze big data with hadoop/create a bucket.png b/analyze big data with hadoop/create a bucket.png new file mode 100644 index 0000000..de20b96 Binary files /dev/null and b/analyze big data with hadoop/create a bucket.png differ diff --git a/analyze big data with hadoop/new cluster.png b/analyze big data with hadoop/new cluster.png new file mode 100644 index 0000000..13eedfc Binary files /dev/null and b/analyze big data with hadoop/new cluster.png differ diff --git a/analyze big data with hadoop/new step.png b/analyze big data with hadoop/new step.png new file mode 100644 index 0000000..0ce44e0 Binary files /dev/null and b/analyze big data with hadoop/new step.png differ diff --git a/analyze big data with hadoop/result.png b/analyze big data with hadoop/result.png new file mode 100644 index 0000000..7daf423 Binary files /dev/null and b/analyze big data with hadoop/result.png differ diff --git a/analyze big data with hadoop/review cluster.png b/analyze big data with hadoop/review cluster.png new file mode 100644 index 0000000..d470bfe Binary files /dev/null and b/analyze big data with hadoop/review cluster.png differ diff --git a/analyze big data with hadoop/step running.png b/analyze big data with hadoop/step running.png new file mode 100644 index 0000000..e6474bd Binary files /dev/null and b/analyze big data with hadoop/step running.png differ diff --git a/intro to RedShift screenshots/SQL.png b/intro to RedShift screenshots/SQL.png new file mode 100644 index 0000000..be6502e Binary files /dev/null and b/intro to RedShift screenshots/SQL.png differ diff --git a/intro to RedShift screenshots/city and count query.png b/intro to RedShift screenshots/city and count query.png new file mode 100644 index 0000000..0f52911 Binary files /dev/null and b/intro to RedShift screenshots/city and count query.png differ diff --git a/intro to RedShift screenshots/configure.png b/intro to RedShift screenshots/configure.png new file mode 100644 index 0000000..73c0afc Binary files /dev/null and b/intro to RedShift screenshots/configure.png differ diff --git a/intro to RedShift screenshots/count.png b/intro to RedShift screenshots/count.png new file mode 100644 index 0000000..0fc052a Binary files /dev/null and b/intro to RedShift screenshots/count.png differ diff --git a/intro to RedShift screenshots/lab.png b/intro to RedShift screenshots/lab.png new file mode 100644 index 0000000..33f024f Binary files /dev/null and b/intro to RedShift screenshots/lab.png differ diff --git a/intro to RedShift screenshots/labinfo.png b/intro to RedShift screenshots/labinfo.png new file mode 100644 index 0000000..1ba92cf Binary files /dev/null and b/intro to RedShift screenshots/labinfo.png differ diff --git a/intro to RedShift screenshots/launch cluster.png b/intro to RedShift screenshots/launch cluster.png new file mode 100644 index 0000000..8c19f99 Binary files /dev/null and b/intro to RedShift screenshots/launch cluster.png differ diff --git a/intro to RedShift screenshots/pgweb.png b/intro to RedShift screenshots/pgweb.png new file mode 100644 index 0000000..7e82701 Binary files /dev/null and b/intro to RedShift screenshots/pgweb.png differ diff --git a/intro to RedShift screenshots/query.png b/intro to RedShift screenshots/query.png new file mode 100644 index 0000000..b093671 Binary files /dev/null and b/intro to RedShift screenshots/query.png differ diff --git a/intro to RedShift screenshots/security group.png b/intro to RedShift screenshots/security group.png new file mode 100644 index 0000000..ff2490b Binary files /dev/null and b/intro to RedShift screenshots/security group.png differ diff --git a/intro to RedShift screenshots/select name.png b/intro to RedShift screenshots/select name.png new file mode 100644 index 0000000..10a8d7c Binary files /dev/null and b/intro to RedShift screenshots/select name.png differ diff --git a/intro to RedShift screenshots/select query.png b/intro to RedShift screenshots/select query.png new file mode 100644 index 0000000..056583d Binary files /dev/null and b/intro to RedShift screenshots/select query.png differ diff --git a/intro to RedShift screenshots/table created.png b/intro to RedShift screenshots/table created.png new file mode 100644 index 0000000..ea80c12 Binary files /dev/null and b/intro to RedShift screenshots/table created.png differ diff --git a/intro to RedShift screenshots/table.png b/intro to RedShift screenshots/table.png new file mode 100644 index 0000000..2b00d5d Binary files /dev/null and b/intro to RedShift screenshots/table.png differ diff --git a/intro to amazon machine learning/addstep.png b/intro to amazon machine learning/addstep.png new file mode 100644 index 0000000..63edd44 Binary files /dev/null and b/intro to amazon machine learning/addstep.png differ diff --git a/intro to amazon machine learning/data uploaded.png b/intro to amazon machine learning/data uploaded.png new file mode 100644 index 0000000..3863bbf Binary files /dev/null and b/intro to amazon machine learning/data uploaded.png differ diff --git a/intro to amazon machine learning/evaluation.png b/intro to amazon machine learning/evaluation.png new file mode 100644 index 0000000..5e523fd Binary files /dev/null and b/intro to amazon machine learning/evaluation.png differ diff --git a/intro to amazon machine learning/hive.png b/intro to amazon machine learning/hive.png new file mode 100644 index 0000000..0136898 Binary files /dev/null and b/intro to amazon machine learning/hive.png differ diff --git a/intro to amazon machine learning/input data.png b/intro to amazon machine learning/input data.png new file mode 100644 index 0000000..2caf711 Binary files /dev/null and b/intro to amazon machine learning/input data.png differ diff --git a/intro to amazon machine learning/model setting.png b/intro to amazon machine learning/model setting.png new file mode 100644 index 0000000..7104f6c Binary files /dev/null and b/intro to amazon machine learning/model setting.png differ diff --git a/intro to amazon machine learning/objects.png b/intro to amazon machine learning/objects.png new file mode 100644 index 0000000..fa9a996 Binary files /dev/null and b/intro to amazon machine learning/objects.png differ diff --git a/intro to amazon machine learning/output.png b/intro to amazon machine learning/output.png new file mode 100644 index 0000000..506ef19 Binary files /dev/null and b/intro to amazon machine learning/output.png differ diff --git a/intro to amazon machine learning/performance.png b/intro to amazon machine learning/performance.png new file mode 100644 index 0000000..0467429 Binary files /dev/null and b/intro to amazon machine learning/performance.png differ diff --git a/intro to amazon machine learning/prediction.png b/intro to amazon machine learning/prediction.png new file mode 100644 index 0000000..d4be2a2 Binary files /dev/null and b/intro to amazon machine learning/prediction.png differ diff --git a/intro to amazon machine learning/result.png b/intro to amazon machine learning/result.png new file mode 100644 index 0000000..4bf2ccf Binary files /dev/null and b/intro to amazon machine learning/result.png differ diff --git a/intro to amazon machine learning/resultofprediction.png b/intro to amazon machine learning/resultofprediction.png new file mode 100644 index 0000000..4ab2385 Binary files /dev/null and b/intro to amazon machine learning/resultofprediction.png differ diff --git a/intro to amazon machine learning/schema.png b/intro to amazon machine learning/schema.png new file mode 100644 index 0000000..d62ffc4 Binary files /dev/null and b/intro to amazon machine learning/schema.png differ diff --git a/launch a VM/configure instance.png b/launch a VM/configure instance.png new file mode 100644 index 0000000..7a18fec Binary files /dev/null and b/launch a VM/configure instance.png differ diff --git a/launch a VM/connect.png b/launch a VM/connect.png new file mode 100644 index 0000000..7b4d492 Binary files /dev/null and b/launch a VM/connect.png differ diff --git a/launch a VM/connected.png b/launch a VM/connected.png new file mode 100644 index 0000000..d57e5c3 Binary files /dev/null and b/launch a VM/connected.png differ diff --git a/launch a VM/keypair.png b/launch a VM/keypair.png new file mode 100644 index 0000000..92a1431 Binary files /dev/null and b/launch a VM/keypair.png differ diff --git a/launch a VM/launch.png b/launch a VM/launch.png new file mode 100644 index 0000000..cbd2439 Binary files /dev/null and b/launch a VM/launch.png differ diff --git a/launch a VM/privacy.png b/launch a VM/privacy.png new file mode 100644 index 0000000..060d21e Binary files /dev/null and b/launch a VM/privacy.png differ diff --git a/launch a VM/running.png b/launch a VM/running.png new file mode 100644 index 0000000..adec593 Binary files /dev/null and b/launch a VM/running.png differ diff --git a/launch a VM/selectVM.png b/launch a VM/selectVM.png new file mode 100644 index 0000000..0b180a1 Binary files /dev/null and b/launch a VM/selectVM.png differ diff --git a/machine learnin model/0.77.png b/machine learnin model/0.77.png new file mode 100644 index 0000000..4089f67 Binary files /dev/null and b/machine learnin model/0.77.png differ diff --git a/machine learnin model/3%.png b/machine learnin model/3%.png new file mode 100644 index 0000000..353b11e Binary files /dev/null and b/machine learnin model/3%.png differ diff --git a/machine learnin model/batch prediction in progress.png b/machine learnin model/batch prediction in progress.png new file mode 100644 index 0000000..190b234 Binary files /dev/null and b/machine learnin model/batch prediction in progress.png differ diff --git a/machine learnin model/batch prediction.png b/machine learnin model/batch prediction.png new file mode 100644 index 0000000..349b605 Binary files /dev/null and b/machine learnin model/batch prediction.png differ diff --git a/machine learnin model/create bucket.png b/machine learnin model/create bucket.png new file mode 100644 index 0000000..5b23beb Binary files /dev/null and b/machine learnin model/create bucket.png differ diff --git a/machine learnin model/data record.png b/machine learnin model/data record.png new file mode 100644 index 0000000..19fd364 Binary files /dev/null and b/machine learnin model/data record.png differ diff --git a/machine learnin model/evaluation.png b/machine learnin model/evaluation.png new file mode 100644 index 0000000..95f03da Binary files /dev/null and b/machine learnin model/evaluation.png differ diff --git a/machine learnin model/input data.png b/machine learnin model/input data.png new file mode 100644 index 0000000..daed311 Binary files /dev/null and b/machine learnin model/input data.png differ diff --git a/machine learnin model/machine learning model.png b/machine learnin model/machine learning model.png new file mode 100644 index 0000000..d682b40 Binary files /dev/null and b/machine learnin model/machine learning model.png differ diff --git a/machine learnin model/performance.png b/machine learnin model/performance.png new file mode 100644 index 0000000..75cb7e5 Binary files /dev/null and b/machine learnin model/performance.png differ diff --git a/machine learnin model/prediction result.png b/machine learnin model/prediction result.png new file mode 100644 index 0000000..eae0682 Binary files /dev/null and b/machine learnin model/prediction result.png differ diff --git a/machine learnin model/prediction.png b/machine learnin model/prediction.png new file mode 100644 index 0000000..636c26c Binary files /dev/null and b/machine learnin model/prediction.png differ diff --git a/machine learnin model/result.png b/machine learnin model/result.png new file mode 100644 index 0000000..4ef0a2e Binary files /dev/null and b/machine learnin model/result.png differ diff --git a/machine learnin model/schema.png b/machine learnin model/schema.png new file mode 100644 index 0000000..fc2deaf Binary files /dev/null and b/machine learnin model/schema.png differ