diff options
| -rw-r--r-- | SI/.obsidian/app.json | 3 | ||||
| -rw-r--r-- | SI/.obsidian/plugins/recent-files-obsidian/data.json | 12 | ||||
| -rw-r--r-- | SI/.obsidian/workspace.json | 13 | ||||
| -rw-r--r-- | SI/Dashboard.md | 11 | ||||
| -rw-r--r-- | SI/Notes/1735275859-resource.md | 8 | ||||
| -rw-r--r-- | SI/Resource/Data Science/Machine Learning/Contents/Bias and Variance.md | 6 | ||||
| -rw-r--r-- | SI/Resource/Data Science/Machine Learning/Machine Learning.md | 24 |
7 files changed, 32 insertions, 45 deletions
diff --git a/SI/.obsidian/app.json b/SI/.obsidian/app.json index 9fc5682..eaddc23 100644 --- a/SI/.obsidian/app.json +++ b/SI/.obsidian/app.json @@ -21,5 +21,6 @@ "propertiesInDocument": "source", "spellcheck": true, "autoPairMarkdown": false, - "strictLineBreaks": false + "strictLineBreaks": false, + "useMarkdownLinks": true }
\ No newline at end of file diff --git a/SI/.obsidian/plugins/recent-files-obsidian/data.json b/SI/.obsidian/plugins/recent-files-obsidian/data.json index 8627ee1..ca80f7c 100644 --- a/SI/.obsidian/plugins/recent-files-obsidian/data.json +++ b/SI/.obsidian/plugins/recent-files-obsidian/data.json @@ -1,14 +1,18 @@ { "recentFiles": [ { - "basename": "Dashboard", - "path": "Dashboard.md" + "basename": "Bias and Variance", + "path": "Resource/Data Science/Machine Learning/Contents/Bias and Variance.md" }, { "basename": "Machine Learning", "path": "Resource/Data Science/Machine Learning/Machine Learning.md" }, { + "basename": "Dashboard", + "path": "Dashboard.md" + }, + { "basename": "Gradient descent", "path": "Resource/Data Science/Machine Learning/Contents/Gradient descent.md" }, @@ -17,10 +21,6 @@ "path": "Resource/Data Science/Machine Learning/Contents/Classification.md" }, { - "basename": "Bias and Variance", - "path": "Resource/Data Science/Machine Learning/Contents/Bias and Variance.md" - }, - { "basename": "SAA", "path": "Resource/AWS/SAA.md" }, diff --git a/SI/.obsidian/workspace.json b/SI/.obsidian/workspace.json index 64b655c..6bbcaab 100644 --- a/SI/.obsidian/workspace.json +++ b/SI/.obsidian/workspace.json @@ -13,13 +13,13 @@ "state": { "type": "markdown", "state": { - "file": "Dashboard.md", + "file": "Resource/Data Science/Machine Learning/Contents/Bias and Variance.md", "mode": "source", "backlinks": false, "source": true }, "icon": "lucide-file", - "title": "Dashboard" + "title": "Bias and Variance" } } ] @@ -77,8 +77,7 @@ "title": "Recent Files" } } - ], - "currentTab": 1 + ] } ], "direction": "horizontal", @@ -195,14 +194,14 @@ "periodic-notes:Open today": false } }, - "active": "b1f7f5ee0151b994", + "active": "3d54370a0282bec3", "lastOpenFiles": [ + "Resource/Data Science/Machine Learning/Machine Learning.md", + "Dashboard.md", "Resource/Data Science/Machine Learning/Contents/Gradient descent.md", "Resource/Data Science/Machine Learning/Contents/Classification.md", "Resource/Data Science/Machine Learning/Contents/Bias and Variance.md", "Resource/AWS/SAA.md", - "Dashboard.md", - "Resource/Data Science/Machine Learning/Machine Learning.md", "Resource/AWS", "Resource/Data Science/SQL/MySQL/MySQL.md", "Spaces/Home/Archive/Data_Science", diff --git a/SI/Dashboard.md b/SI/Dashboard.md index ea9865e..dcb8a56 100644 --- a/SI/Dashboard.md +++ b/SI/Dashboard.md @@ -9,32 +9,27 @@ cssclasses: - dashboard - dashboard-ReadLineLength --- - # Dashboard - ### 🏠 [House]() - - 💰 Budget - [[Q1 2024]] - #### 🛒 Grocery - 💳 Transaction - - ### 👤 [Personal]() - - #### 🏡[Archive](file:////Users/si/Documents/SI/Archive) - #### ✍️ [Area](file:////Users/si/Documents/SI/Area) - #### 📁 [Projects](file:////Users/si/Documents/SI/Project) - #### 📚 [Resource](file:////Users/si/Documents/SI/Resource) - ✅ [To-do](file:////Users/si/Documents/SI/To-do) - `$=dv.list(dv.pages('"To-do"').sort(f=>f.file.name,"desc").limit(4).file.link)` + `$=dv.list(dv.pages('"To-do"').sort(f=>f.file.name,"desc").limit(4).file.link)` - ### 🏢 [School]() - - 📔 [Class]() - `$=dv.list(dv.pages('"Resource"').sort(f=>f.file.mtime.ts,"desc").limit(4).file.link)` + `$=dv.list(dv.pages('"Resource"').sort(f=>f.file.mtime.ts,"desc").limit(4).file.link)` - 💼 [Project]() - ✏️ [Assignment]() - `$=dv.list(dv.pages('#assignment').sort(f=>f.file.mtime.ts,"desc").file.link)` + `$=dv.list(dv.pages('#assignment').sort(f=>f.file.mtime.ts,"desc").file.link)` - ### 🚧 Life Progress diff --git a/SI/Notes/1735275859-resource.md b/SI/Notes/1735275859-resource.md deleted file mode 100644 index 0638325..0000000 --- a/SI/Notes/1735275859-resource.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -id: 1735275859-resource -aliases: - - Resource -tags: [] ---- - -# Resource diff --git a/SI/Resource/Data Science/Machine Learning/Contents/Bias and Variance.md b/SI/Resource/Data Science/Machine Learning/Contents/Bias and Variance.md index 294f138..a3e6398 100644 --- a/SI/Resource/Data Science/Machine Learning/Contents/Bias and Variance.md +++ b/SI/Resource/Data Science/Machine Learning/Contents/Bias and Variance.md @@ -34,9 +34,9 @@ tags: - Solution - Use validation data set - - $\bbox[teal,5px,border:2px solid red]{\text{Train data (80\%)+ Valid data (10\%) + Test data (10\%)}}$ - - Cannot directly participate in model training - - Continuously evaluates in the learning base, and stores the best existing performance + - $\bbox[teal,5px,border:2px solid red]{\text{Train data (80\%)+ Valid data (10\%) + Test data (10\%)}}$ + - Cannot directly participate in model training + - Continuously evaluates in the learning base, and stores the best existing performance - K-fold cross validation - **Leave-One-Out Cross-Validation (LOOCV)** - a special case of k-fold cross-validation where **K** is equal to the number of data points in the dataset. diff --git a/SI/Resource/Data Science/Machine Learning/Machine Learning.md b/SI/Resource/Data Science/Machine Learning/Machine Learning.md index 6dbb5e8..3ee6924 100644 --- a/SI/Resource/Data Science/Machine Learning/Machine Learning.md +++ b/SI/Resource/Data Science/Machine Learning/Machine Learning.md @@ -32,7 +32,7 @@ Learning is to find the best model represented data, meaning optimization of par - A model with the smallest difference between predictions $\hat{y}$ and actual values $y$ - A model parameter makes the smallest loss -## Types of learning +## Types of Learning ### Supervised Learning @@ -46,9 +46,9 @@ Learning is to find the best model represented data, meaning optimization of par - [[Support Vector Machine]] ([[Support Vector Machine |SVM]]) - [[Decision Tree]] - [[Linear Discriminant Analysis]] ([[Linear Discriminant Analysis |LDA]]) - 1. [[Ensemble]] - - [[Bagging]] - - [[Boosting]] + 1. [[Ensemble]] + - [[Bagging]] + - [[Boosting]] ### Unsupervised Learning @@ -65,11 +65,11 @@ Learning is to find the best model represented data, meaning optimization of par - Data Properties - Features (= attributes, independent variables): X - - characteristics of data or items - - N: # of data sample - - D: # of features + - characteristics of data or items + - N: # of data sample + - D: # of features - Label (dependent variables): y - - if there is a label, it is supervised. Otherwise, it is unsupervised + - if there is a label, it is supervised. Otherwise, it is unsupervised - Parameter (=weight): learnable parameters that a model have, not given data - [[Hyperparameter]]: parameters that human has to decide - Input vs. Output @@ -77,11 +77,11 @@ Learning is to find the best model represented data, meaning optimization of par - Output ($\hat{y}$): values of prediction derived from model - Linear vs. Nonlinear - Linear regression: a model can be implemented by a linear function - - Simple Linear Regression: Involves two variables — one independent variable and one dependent variable. The relationship between these variables is modeled as a straight line. - - Multiple Linear Regression: Uses more than one independent variable to predict a dependent variable. The relationship is still linear in nature, meaning it assumes a straight-line relationship between each independent variable and the dependent variable. - - ex) $y = w_0 + w_1*x_1 + w_2*x_2 + \dots + w_D*x_D, y = w_0 + w_1*x_1 + w_2*x^2$ + - Simple Linear Regression: Involves two variables — one independent variable and one dependent variable. The relationship between these variables is modeled as a straight line. + - Multiple Linear Regression: Uses more than one independent variable to predict a dependent variable. The relationship is still linear in nature, meaning it assumes a straight-line relationship between each independent variable and the dependent variable. + - ex) $y = w_0 + w_1*x_1 + w_2*x_2 + \dots + w_D*x_D, y = w_0 + w_1*x_1 + w_2*x^2$ - Non-linear regression: a model can't be implemented by a linear function - - ex) $log(y) = w_0 + w_1*log(x), y = max(x, 0)$ + - ex) $log(y) = w_0 + w_1*log(x), y = max(x, 0)$ ## Basic Math for ML |
