path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
129000049/cell_45
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression import numpy as np import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col...
code
129000049/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any() wdf_num = wdf.loc[:, ['mintempC', 'tempC', 'HeatIndexC', 'pressure']] wdf_num.shape wdf_num.columns weth = wdf_nu...
code
129000049/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape
code
129000049/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any() wdf_num = wdf.loc[:, ['mintempC', 'tempC', 'HeatIndexC', 'pressure']] wdf_num.shape
code
129000049/cell_38
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression import numpy as np import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col...
code
129000049/cell_17
[ "text_html_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any() wdf_num = wdf.loc[:, ['mintempC', 'tempC', 'HeatIndexC', 'pressure']] wdf_num.shape wdf_num.columns
code
129000049/cell_31
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression train_X.shape train_y.shape model = LinearRegression() model.fit(train_X, train_y)
code
129000049/cell_22
[ "text_html_output_1.png" ]
train_X.shape
code
1008454/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd gTemp = pd.read_csv('../input/GlobalTemperatures.csv') gTempCountry = pd.read_csv('../input/GlobalLandTemperaturesByCountry.csv') gTempState = pd.read_csv('../input/GlobalLandTemperaturesByState.csv') gTempMajorCity = pd.read_csv('../input/GlobalLandTemperaturesByMa...
code
1008454/cell_3
[ "text_html_output_1.png" ]
import pandas as pd gTemp = pd.read_csv('../input/GlobalTemperatures.csv') gTempCountry = pd.read_csv('../input/GlobalLandTemperaturesByCountry.csv') gTempState = pd.read_csv('../input/GlobalLandTemperaturesByState.csv') gTempMajorCity = pd.read_csv('../input/GlobalLandTemperaturesByMajorCity.csv') gTempCity = pd.read...
code
1008454/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd gTemp = pd.read_csv('../input/GlobalTemperatures.csv') gTempCountry = pd.read_csv('../input/GlobalLandTemperaturesByCountry.csv') gTempState = pd.read_csv('../input/GlobalLandTemperaturesByState.csv') gTempMajorCity = pd.read_csv('../input/GlobalLandTemperaturesByMajorCity.csv') gTempCity = pd.read...
code
330673/cell_13
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[c...
code
330673/cell_9
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[c...
code
330673/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, ax...
code
330673/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, ax...
code
330673/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') print(df.head())
code
330673/cell_11
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[c...
code
330673/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[c...
code
330673/cell_15
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[c...
code
330673/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[c...
code
330673/cell_14
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[c...
code
325017/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd masterDF = pd.read_csv('../input/emails.csv') messageList = masterDF['message'].tolist() bodyList = [] for message in messageList: firstSplit = message.split('X-FileName: ', 1)[1] secondSplit = firstSplit.split('.') if len(secondSplit) > 1: secondSplit = secondSplit[1] body...
code
16135671/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt def test_plot(x): pass test_plot(x_train_re)
code
16135671/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt def test_plot(x): pass test_plot(x_test)
code
16135671/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
!pip install tensorflow-gpu import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import keras from keras.models import Model from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Activation from keras.datasets import cifar10
code
16135671/cell_3
[ "image_output_1.png" ]
from keras.datasets import cifar10 def load_images(): (x_train, _), (x_test, _) = cifar10.load_data() return (x_train, x_test) x_train, x_test = load_images()
code
16135671/cell_14
[ "image_output_1.png" ]
from keras.datasets import cifar10 from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Activation from keras.models import Model import keras import matplotlib.pyplot as plt def load_images(): (x_train, _), (x_test, _) = cifar10.load_data() return (x_train, x_test...
code
16135671/cell_10
[ "text_plain_output_1.png" ]
import keras def normalize(x_train, x_test): x_train = keras.utils.normalize(x_train) x_test = keras.utils.normalize(x_test) return (x_train, x_test) print(x_train_re.shape[1:]) print(x_test_re.shape) input_shape = x_train.shape[1:] receptive_field = (3, 3) pooling_field = (2, 2)
code
16135671/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt def test_plot(x): pass test_plot(x_train)
code
122264859/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnis...
code
122264859/cell_4
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import numpy as np import pandas as pd mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = mnist.load_data() train_images_mnist = np.reshape(train_images_mnist, (train_images...
code
122264859/cell_19
[ "image_output_1.png" ]
from imutils.contours import sort_contours from matplotlib import cm from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator ...
code
122264859/cell_18
[ "text_plain_output_1.png" ]
from imutils.contours import sort_contours from matplotlib import cm from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator ...
code
122264859/cell_8
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnis...
code
122264859/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from imutils.contours import sort_contours from matplotlib import cm import cv2 import imutils import matplotlib.pyplot as plt image_path = '/kaggle/input/tester2/pja 4.jpg' image = cv2.imread(image_path) edged = cv2.Canny(blurred, 30, 250) cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX...
code
122264859/cell_17
[ "text_plain_output_1.png" ]
from imutils.contours import sort_contours from matplotlib import cm from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.optimizers import RMSprop import cv2 import imutils import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorf...
code
122264859/cell_14
[ "text_plain_output_1.png" ]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cropped = gray[120:, :] blurred = cv2.GaussianBlur(cropped, (5, 5), 0) from matplotlib import cm fig = plt.figure(figsize=(16, 4)) ax = plt.subplot(1, 4, 1) ax.imshow(image) ax.set_title('original image') ax = plt.subplot(1, 4, 2) ax.imshow(gray, cmap=cm.binary_r) ax.set_a...
code
122264859/cell_10
[ "text_plain_output_1.png" ]
pip install imutils
code
122264859/cell_12
[ "text_plain_output_1.png" ]
from tensorflow.keras.models import load_model model_path = '/kaggle/working/model_v2' print('Loading NN model...') model = load_model(model_path) print('Done')
code
122264859/cell_5
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.optimizers import RMSprop import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_...
code
16115621/cell_9
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) train['SalePrice'].hist(bins=50) y ...
code
16115621/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.describe()
code
16115621/cell_11
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(...
code
16115621/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['SalePrice'].hist(bins=50)
code
16115621/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # visualization import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=Tru...
code
16115621/cell_14
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(...
code
16115621/cell_10
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(...
code
16115621/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(...
code
16115621/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.describe()
code
72097728/cell_9
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneH...
code
72097728/cell_3
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd from sklearn.model_selection import train_test_split df = pd.read_csv('../input/30-days-of-ml/train.csv') test_df = pd.read_csv('../input/30-days-of-ml/test.csv') sample_df = pd.read_csv('../input/30-days-of-ml/sample_submiss...
code
2003139/cell_9
[ "text_plain_output_1.png" ]
from PIL import Image from keras import optimizers, losses, activations, models from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate from random import shuffle import numpy as np # linear algebra import os import pandas as pd # data...
code
2003139/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import os from PIL import Image from skimage.transform import resize from random import shuffle from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2003139/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image from keras import optimizers, losses, activations, models from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, ...
code
105185803/cell_1
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.express as px import os !pip install kaleido
code
105185803/cell_5
[ "text_html_output_2.png" ]
import pandas as pd import plotly.express as px stream = open('../input/bbk-kunpeng/perf_test_result.txt', 'r') stream = stream.read() ds = pd.DataFrame(columns=['N', 'NRHS', 'data_type', 'gflops', 'uplo']) datatype = 'single' N = 0 NRHS = 0 uplo = 'U' gflops = 0.0 cnt = 0 for line in stream.split('\n'): if lin...
code
121149832/cell_4
[ "application_vnd.jupyter.stderr_output_766.png", "application_vnd.jupyter.stderr_output_116.png", "application_vnd.jupyter.stderr_output_74.png", "application_vnd.jupyter.stderr_output_268.png", "application_vnd.jupyter.stderr_output_362.png", "text_plain_output_743.png", "text_plain_output_673.png", ...
# Install nb_black for autoformatting !pip install nb_black --quiet
code
121149832/cell_19
[ "text_html_output_1.png" ]
from kaggle_secrets import UserSecretsClient from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler from sklearn.metrics import accuracy_score import json import numpy as np import os import os import pandas as pd import random import torch import wandb import warnings import numpy as np...
code
121149832/cell_3
[ "text_html_output_4.png", "text_html_output_2.png", "text_html_output_5.png", "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png", "text_html_output_3.png" ]
!pip install onnx_tf !pip install tflite-runtime !pip install -q --upgrade wandb
code
121149832/cell_17
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import json import numpy as np import os import os import pandas as pd import random import torch import torch.nn as nn import warnings import numpy as np import pandas as pd import math import random import time from collections import OrderedDict import tensorflow ...
code
121149832/cell_14
[ "text_plain_output_1.png" ]
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler from sklearn.metrics import accuracy_score import json import numpy as np import os import os import pandas as pd import random import torch import warnings import numpy as np import pandas as pd import math import random import time f...
code
48166170/cell_13
[ "text_html_output_1.png" ]
k_range = range(1, 21) print('k range', k_range)
code
48166170/cell_39
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier val_results = [] train_results = [] k_range = range(1, 21) for k in k_range: clf_2 = KNeighborsClassifier(n_neighbors=k) clf_2 = clf_2.fit(X_train, y_train) pred_train = clf_2...
code
48166170/cell_26
[ "image_output_1.png" ]
from sklearn import metrics from sklearn.model_selection import cross_val_score, learning_curve, validation_curve from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd df2 = pd.read_csv('../input/diabetescsv/diabetes.csv') X = df2.iloc[:, 0:8] y...
code
48166170/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df2 = pd.read_csv('../input/diabetescsv/diabetes.csv') X = df2.iloc[:, 0:8] y = df2.iloc[:, 8] print(X)
code
48166170/cell_17
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt val_results = [] train_results = [] k_range = range(1, 21) for k in k_range: clf_2 = KNeighborsClassifier(n_neighbors=k) clf_2 = clf_2.fit(X_train, y_train) pred_train = clf_2.predict(X_train)...
code
48166170/cell_31
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier clf_3 = KNeighborsClassifier() param_grid = [{'weights': ['uniform'], 'n_neighbors': list(range(1, 21))}, {'weights': ['distance'], 'n_neighbors': list(range(1, 21))}] print(param_grid)
code
48166170/cell_22
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier val_results = [] train_results = [] k_range = range(1, 21) for k in k_range: clf_2 = KNeighborsClassifier(n_neighbors=k) clf_2 = clf_2.fit(X_train, y_train) pred_train = clf_2.predict(X_train) train_score = metrics.accura...
code
48166170/cell_37
[ "image_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier clf_3 = KNeighborsClassifier() param_grid = [{'weights': ['uniform'], 'n_neighbors': list(range(1, 21))}, {'weights': ['distance'], 'n_neighbors': list(range(1, 21))}] gs = GridSearchCV(clf_3, param_grid, scoring='acc...
code
48166170/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd df2 = pd.read_csv('../input/diabetescsv/diabetes.csv') df2.head(10)
code
16163803/cell_13
[ "image_output_1.png" ]
"""his = model.fit_generator(train_gen, epochs=10, steps_per_epoch=len(X_train)/BATCH_SIZE, validation_data=test_gen, validation_steps=len(X_test)/BATCH_SIZE)"""
code
16163803/cell_4
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edition/train' TEST_DIR = '../input/dogs...
code
16163803/cell_6
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edi...
code
16163803/cell_2
[ "text_plain_output_1.png" ]
import os import os print(os.listdir('../input'))
code
16163803/cell_11
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten from keras.models import Sequential from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'do...
code
16163803/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Conv2D, MaxPool...
code
16163803/cell_7
[ "image_output_1.png" ]
from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = ...
code
16163803/cell_15
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten from keras.models import Sequential from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'do...
code
16163803/cell_16
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten from keras.models import Sequential from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'do...
code
16163803/cell_14
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten from keras.models import Sequential from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'do...
code
105192890/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import torch from torch import nn from torch.utils.data import TensorDataset, DataLoader import pandas as pd import numpy as np import math import matplotlib.pyplot as plt from sklearn.metrics import r2_score warnings.simplefilter('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): ...
code
121150886/cell_25
[ "text_plain_output_1.png" ]
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from transformers import AutoTokenizer, AutoModel import re import torch import torch import unicodedata if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') tokenizer = Auto...
code
121150886/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from ydata_profiling import ProfileReport import pandas as pd df_reviews = pd.read_csv('/kaggle/input/arabic-100k-reviews/ar_reviews_100k.tsv', delimiter='\t') profile = ProfileReport(df_reviews, title='Profiling Report') profile.to_notebook_iframe()
code
121150886/cell_6
[ "text_plain_output_1.png" ]
import torch if torch.cuda.is_available(): device = torch.device('cuda') print(f'Using {torch.cuda.device_count()} GPU(s)!') print(f'Device name: {torch.cuda.get_device_name(0)}') else: device = torch.device('cpu') print('No GPU available.')
code
121150886/cell_19
[ "text_plain_output_1.png" ]
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from transformers import AdamW, get_linear_schedule_with_warmup from transformers import AutoTokenizer, AutoModel import numpy as np import random import re import time import torch import torch import torch.nn as nn impo...
code
121150886/cell_1
[ "text_plain_output_1.png" ]
!pip install Arabic-Stopwords !pip install emoji # !pip install Tashaphyne # !pip install qalsadi # !pip install np_utils !pip install ydata-profiling
code
121150886/cell_8
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-mini-arabic')
code
121150886/cell_15
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import torch import torch.nn as nn from transformers import BertModel class BertClassifier(nn.Module): def __init__(self, freeze_bert=False, version='mini'): super(BertClassifier, self).__init__() D_in = 256 if version == 'mini' else 758 H = 50 D_out = 2 self.bert = AutoModel...
code
121150886/cell_12
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from transformers import AutoTokenizer, AutoModel import pandas as pd import re import torch import unicodedata df_reviews = pd.read_csv('/kaggle/input/arabic-100k-reviews/ar_reviews_100k.tsv', delimiter='\t') label_mapping = {'Positive': 1, 'Negative': 0} df_...
code
121150886/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_reviews = pd.read_csv('/kaggle/input/arabic-100k-reviews/ar_reviews_100k.tsv', delimiter='\t') label_mapping = {'Positive': 1, 'Negative': 0} df_reviews = df_reviews[df_reviews.label != 'Mixed'] print(df_reviews.shape) df_reviews.label = df_reviews.label.map(label_mapping) df_reviews.label.valu...
code
128034284/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd def get_nan_dummy(series): """Given a Series containing NaN and several classes return a dummy Series indicating 0 for NaN and 1 for non-NaN data Parameters ---------- - series : pd.Series, input series or col to dummify Return ------ - s : pd.Series the dummy Series...
code
128034284/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import norm, zscore from sklearn.compose import make_column_transformer from sklearn.compose import make_column_selector from sklearn.ensemble import GradientBoostingRegressor, HistGradie...
code
2019512/cell_9
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read...
code
2019512/cell_6
[ "image_output_1.png" ]
from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output...
code
2019512/cell_2
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) frame.iloc[:, 5:].head()
code
2019512/cell_1
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) frame.iloc[:, :5].head()
code
2019512/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read...
code
2019512/cell_8
[ "image_output_1.png" ]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read...
code
2019512/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distin...
code
2019512/cell_10
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read...
code