Skip to main content


import csv
with open('/content/drive/MyDrive/ML lab programs/Lab 1 Find-S algorithm/weather.csv'as f:
  reader = csv.reader(f)
  data = list(reader) 

print('Training data')
for row in data:
  print(row)
attr_len=len(data[0])-1
h = ['0']*attr_len      
print('The Hypothesis are')
for row in data:
  if row[-1] == 'Yes':
    j = 0
    for col in row: 
      if col != 'Yes'
        if col != h[j] and h[j] == '0':
          h[j] = col
        elif col != h[j] and h[j] != '0':
          h[j] = '?'
      j = j + 1
  print(h)
print('Maximally Specific Hypothesis:', h)



2,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,


import csv
with open('/content/drive/MyDrive/ML lab programs/Lab 2 Candidate Selection Algorithm/enjoysport1.csv'as f:
  reader = csv.reader(f)
  data = list(reader) 
print('Training data')
for row in data:
  print(row)
print('-------------------------------------------------------------')


attr_len=len(data[0])-1
s = ['0']*attr_len      
g = ['?']*attr_len      
temp=[] 


print('the Hypothesis are')
print('S=',s)
print('G=',g)
print('-------------------------------------------------------------')


for row in data:
  if row[-1] == 'yes':
    j=0
    for col in row:
      if col != 'yes':
        if col != s[j] and s[j] == '0':
          s[j] = col
        elif col != s[j] and s[j] != '0':
          s[j] = '?'
      j+= 1

    for j in range(0,attr_len):
      for k in temp:
        if k[j] != s[j] and k[j] != '?':
          temp.remove(k)
  elif row[-1] == 'no':
    j = 0
    for col in row:
      if col != 'no':
        if col != s[j] and s[j] != '?':
          g[j] = s[j]
          temp.append(g)
          g = ['?'] * attr_len
      j+=1
  print('S=',s)
  if len(temp) == 0:
    print('G=',g)
  else:
    print('G=',temp)
  print('-------------------------------------------------------------')


4444444444444444444/////////////////////////////////////

import numpy as np

X = np.array(([29], [15], [36]), dtype=float)
y = np.array(([92], [86], [89]), dtype=float)
X = X/np.amax(X,axis=0
y = y/100


def sigmoid (x):
    return 1/(1 + np.exp(-x))


def derivatives_sigmoid(x):
    return x * (1 - x)


epoch=5 
lr=0.1 

inputlayer_neurons = 2 
hiddenlayer_neurons = 3 
output_neurons = 1 


wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))


for i in range(epoch):
    
    hinp1=np.dot(X,wh)
    hinp=hinp1 + bh
    hlayer_act = sigmoid(hinp)
    outinp1=np.dot(hlayer_act,wout)
    outinp= outinp1+bout
    output = sigmoid(outinp)
    
    
    EO = y-output
    outgrad = derivatives_sigmoid(output)
    d_output = EO * outgrad
    EH = d_output.dot(wout.T)
    hiddengrad = derivatives_sigmoid(hlayer_act)
    d_hiddenlayer = EH * hiddengrad
    
    wout += hlayer_act.T.dot(d_output) *lr   
    bout += np.sum(d_output, axis=0,keepdims=True) *lr
    wh += X.T.dot(d_hiddenlayer) *lr
    bh += np.sum(d_hiddenlayer, axis=0,keepdims=True) *lr
print("Input: \n" ,X) 
print("Actual Output: \n" ,y)
print("Predicted Output: \n" ,output)


6////////////////////////////////////////////

import pandas as pd 
msg=pd.read_csv('/content/drive/MyDrive/ML lab programs/lab 6 bayesian classifier/data6.csv',names=['message','label']) #Tabular form data 
print('Total instances in the dataset:',msg.shape[0])

msg['labelnum']=msg.label.map({'pos':1,'neg':0}) 

X=msg.message
Y=msg.labelnum





from sklearn.model_selection import train_test_split 
xtrain,xtest,ytrain,ytest=train_test_split(X,Y) 

print('\nDataset is split into Training and Testing samples'
print('Total training instances :', ytrain.shape[0]) 
print('Total testing instances :', ytest.shape[0])


from sklearn.feature_extraction.text import CountVectorizer 
count_vect = CountVectorizer()
xtrain_dtm = count_vect.fit_transform(xtrain) #Sparse matrix 
xtest_dtm = count_vect.transform(xtest)
print('\nTotal features extracted using CountVectorizer:',xtrain_dtm.shape[1])

print('\nThe words or Tokens in the text documents\n'
print(count_vect.get_feature_names())



from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB().fit(xtrain_dtm,ytrain) 
predicted = clf.predict(xtest_dtm)


from sklearn import metrics 
print('\nAccuracy metrics')
print('==================')
print('Accuracy of the classifer is',metrics.accuracy_score(ytest,predicted))

print('Recall :',metrics.recall_score(ytest,predicted), '\nPrecison :',metrics.precision_score(ytest,predicted))
print('Confusion matrix')
print('==================')
print(metrics.confusion_matrix(ytest,predicted))

7.............................................................

import pandas as pd
import numpy as np

data = pd.read_csv('/content/drive/MyDrive/ML lab programs/lab 7 bayesian network/heart.csv')
data = data.replace('?',np.nan)
#display the data
print('Sample instances from the dataset are given below')
print(data.head())
#display the Attributes names and datatyes
print('\n Attributes and datatypes')
print(data.dtypes)


from pgmpy.models import BayesianModel
from pgmpy.estimators import MaximumLikelihoodEstimator
from pgmpy.inference import VariableElimination

model =BayesianModel([('age','heartdisease'),('sex','heartdisease'),
                      ('exang','heartdisease'),('cp','heartdisease'),
                      ('heartdisease','restecg'),('heartdisease','chol')])
import networkx as nx
import matplotlib.pyplot as plt
nx.draw(model, with_labels = True); 
plt.show()



print('\n Learning CPD using Maximum likelihood estimators')
model.fit(data,estimator=MaximumLikelihoodEstimator)
print('\n Inferencing with Bayesian Network:')
infer = VariableElimination(model)


print('\n 1.Probability of HeartDisease given evidence=restecg :1')
q1=infer.query(variables=['heartdisease'],evidence={'restecg':1})
print(q1)
print('\n 2.Probability of HeartDisease given evidence= cp:2 ')
q2=infer.query(variables=['heartdisease'],evidence={'cp':2})
print(q2)


8.....................................................

import matplotlib.pyplot as plt
from sklearn import datasets
import sklearn.metrics as sm
import pandas as pd
import numpy as np

iris = datasets.load_iris()

X = pd.DataFrame(iris.data)
X.columns = ['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width']

y = pd.DataFrame(iris.target)
y.columns = ['Targets']

plt.figure(figsize=(14,7))
colormap = np.array(['red''lime''black'])


plt.subplot(131)
plt.scatter(X.Petal_Length, X.Petal_Width, c=colormap[y.Targets], s=40)
plt.title('Real Classification')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')


from sklearn.cluster import KMeans
model = KMeans(n_clusters=3)
model.fit(X)
plt.subplot(132)
plt.scatter(X.Petal_Length, X.Petal_Width, c=colormap[model.labels_], s=40)
plt.title('K Mean Classification')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
print('The accuracy score of K-Mean: ',sm.accuracy_score(y, model.labels_))
print('The Confusion matrixof K-Mean: ',sm.confusion_matrix(y, model.labels_))


from sklearn.mixture import GaussianMixture
gmm = GaussianMixture(n_components=3)
gmm.fit(X)
y_gmm = gmm.predict(X)
plt.subplot(133)
plt.scatter(X.Petal_Length, X.Petal_Width, c=colormap[y_gmm], s=40)
plt.title('GMM Classification')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')

print('The accuracy score of EM: ',sm.accuracy_score(y, y_gmm))
print('The Confusion matrix of EM: ',sm.confusion_matrix(y, y_gmm))







Comments

Popular posts from this blog

VTU board has announced holidays for the following collages on 05-03-2019

10 Things that Indians go Crazy About

1 .TEA (chai) 'Tea is the national drink of India' , being a drink which can be prepared within 2-5 minutes can be served 24/7 irrespective of all seasons.Tea shares its presence during happy Marriages and even on sad Funerals.It is consumed from 6 year old boy to 80 year old body.There are many people who won't start their day without having tea.Tea has got many fans by its taste and super qualities.If you didn't tried tea yet you  didn't tried the best drink in the world yet. www.moziru.com 2.Discount Sales. The only shops which runs busy all the days are sales with discount, even though there is no guarantee for product ,people prefer these sales and don't stop bargaining even on discount prices.On the other side these sales play a major role in recycling waste products and have their own contribution to country's economy. www.jantoo.com 3.English Being the Britain's national language the more no.of fluent English speakers in th...