import tensorflow as tf

import matplotlib

matplotlib.use('agg')

import numpy as np

import pandas as pd

import matplotlib.pyplot as plt

import seaborn as sns

import random

import itertools

from sklearn.manifold import TSNE

from tensorflow.python.keras._impl.keras.datasets.cifar10 import load_data


def Creat_Imbalance_Cifar10_data(class_label, data_count, cifar10_x, cifar10_y):

    print("Creat_Imbalance_CIFAR10_data process")

    count = 0

    

    li_x_data = []

    li_y_data = []


    while count < data_count:

        random_num = random.randint(0,len(cifar10_x)-1)

        if(cifar10_y[random_num] == class_label):

            li_x_data.append(cifar10_x[random_num])

            li_y_data.append(cifar10_y[random_num])

            count+=1


    return li_x_data, li_y_data

# one_hot_encoding(data_y)

def one_hot_encoding(data_y):

    print("one_hot_encoding process")

    cls = set(data_y)

    class_dict = {c: np.identity(len(cls))[i, :] for i, c in enumerate(cls)}

    one_hot = np.array(list(map(class_dict.get, data_y)))

    

    return one_hot


# next_batch (data_count, data_x, y_data)

def next_batch(data_count, data_x, data_y):

    idx = np.arange(0, len(data_x))

    np.random.shuffle(idx)

    idx = idx[:data_count]

    

    data_shuffle = [data_x[i] for i in idx]

    labels_shuffle = [data_y[i] for i in idx]


    return np.asarray(data_shuffle), np.asarray(labels_shuffle)


# cifar10 load

(origin_data_x, origin_data_y), (x_test, y_test) = load_data()


# label & count 

label = [0,1,2,3,4,5,6,7,8,9]

count = [10,20,30,500,100,10,50,100,200,2000]


# imbalance data set list

train_data_x = []

train_data_y = []


# creat_imbalance data

for i in range(10):

    data_x, data_y = Creat_Imbalance_Cifar10_data(label[i],count[i],origin_data_x,origin_data_y)

    

    train_data_x.append(data_x)

    train_data_y.append(data_y)


# train_data_y preprocessing

one_hot = [a for i in train_data_y for a in i]

idx = 0


one_hot_y = []


for i in one_hot:

    one_hot_y.append(np.asscalar(one_hot[idx]))

    idx+=1

one_hot = []

one_hot.append(one_hot_y)


# x reduce_demension

imbalance_train_x = [a for i in train_data_x for a in i]

# y reduce_demension

one_hot = [a for i in one_hot for a in i]

# one_hot _encoder

imbalance_train_y_one_hot = one_hot_encoding(one_hot)


print("----------------------------------------------------------")

    

# Network Model.    

tf.set_random_seed(777)


learning_rate = 0.001

training_epochs = 10

batch_size = 32


keep_prob = tf.placeholder(tf.float32)


#X = tf.placeholder(tf.float32, [None, 784])

X = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])

Y = tf.placeholder(tf.float32, [None, 10])

#Layer_1

W1 = tf.Variable(tf.random_normal([3, 3, 3, 64], stddev=0.01))

L1 = tf.nn.conv2d(X, W1, strides=[1, 1, 1, 1], padding='SAME')

L1 = tf.layers.batch_normalization(L1, center=True, scale=True, training=True)

L1 = tf.nn.relu(L1)

#Layer_2

W2 = tf.Variable(tf.random_normal([3, 3, 64, 64], stddev=0.01))

L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')

L2 = tf.layers.batch_normalization(L1, center=True, scale=True, training=True)

L2 = tf.nn.relu(L2)

L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

L2 = tf.nn.dropout(L2, keep_prob=keep_prob)

#Layer_3

W3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))

L3 = tf.nn.conv2d(L2, W3, strides=[1, 1, 1, 1], padding='SAME')

L3 = tf.layers.batch_normalization(L3, center=True, scale=True, training=True)

L3 = tf.nn.relu(L3)

#Layer_4

W4 = tf.Variable(tf.random_normal([3, 3, 128, 128], stddev=0.01))

L4 = tf.nn.conv2d(L3, W4, strides=[1, 1, 1, 1], padding='SAME')

L4 = tf.layers.batch_normalization(L4, center=True, scale=True, training=True)

L4 = tf.nn.relu(L4)

L4 = tf.nn.max_pool(L4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

L4 = tf.nn.dropout(L4, keep_prob=keep_prob)

#Layer_5

W5 = tf.Variable(tf.random_normal([3, 3, 128, 256], stddev=0.01))

L5 = tf.nn.conv2d(L4, W5, strides=[1, 1, 1, 1], padding='SAME')

L5 = tf.layers.batch_normalization(L5, center=True, scale=True, training=True)

L5 = tf.nn.relu(L5)

#Layer_6

W6 = tf.Variable(tf.random_normal([3, 3, 256, 256], stddev=0.01))

L6 = tf.nn.conv2d(L5, W6, strides=[1, 1, 1, 1], padding='SAME')

L6 = tf.layers.batch_normalization(L6, center=True, scale=True, training=True)

L6 = tf.nn.relu(L6)

#Layer_7

W7 = tf.Variable(tf.random_normal([3, 3, 256, 256], stddev=0.01))

L7 = tf.nn.conv2d(L6, W7, strides=[1, 1, 1, 1], padding='SAME')

L7 = tf.layers.batch_normalization(L7, center=True, scale=True, training=True)

L7 = tf.nn.relu(L7)

L7 = tf.nn.max_pool(L7, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

L7 = tf.nn.dropout(L7, keep_prob=keep_prob)

#Layer_8

W8 = tf.Variable(tf.random_normal([3, 3, 256, 512], stddev=0.01))

L8 = tf.nn.conv2d(L7, W8, strides=[1, 1, 1, 1], padding='SAME')

L8 = tf.layers.batch_normalization(L8, center=True, scale=True, training=True)

L8 = tf.nn.relu(L8)

#Layer_9

W9 = tf.Variable(tf.random_normal([3, 3, 512, 512], stddev=0.01))

L9 = tf.nn.conv2d(L8, W9, strides=[1, 1, 1, 1], padding='SAME')

L9 = tf.layers.batch_normalization(L9, center=True, scale=True, training=True)

L9 = tf.nn.relu(L9)

#Layer_10

W10 = tf.Variable(tf.random_normal([3, 3, 512, 512], stddev=0.01))

L10 = tf.nn.conv2d(L9, W10, strides=[1, 1, 1, 1], padding='SAME')

L10 = tf.layers.batch_normalization(L10, center=True, scale=True, training=True)

L10 = tf.nn.relu(L10)

L10 = tf.nn.max_pool(L10, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

L10 = tf.nn.dropout(L10, keep_prob=keep_prob)

L10_flat = tf.reshape(L10, [-1, 512 * 2 * 2])

#Layer_11

W11 = tf.get_variable("W11", shape=[512 * 2 * 2, 100], initializer=tf.contrib.layers.xavier_initializer())

b11 = tf.Variable(tf.random_normal([100]))

L11 = tf.nn.relu(tf.matmul(L10_flat, W11) + b11)

L11 = tf.nn.dropout(L11, keep_prob=keep_prob)

#Layer_12

W12 = tf.get_variable("W12", shape=[100, 10], initializer=tf.contrib.layers.xavier_initializer())

b12 = tf.Variable(tf.random_normal([10]))

logits = tf.matmul(L11, W12) + b12


#logits = output

y_pred = tf.nn.softmax(logits)


cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=logits))

optimizer = tf.train.RMSPropOptimizer(1e-4).minimize(cost)


correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(Y, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


sess = tf.Session()

sess.run(tf.global_variables_initializer())


print('Learning start')

for epoch in range(training_epochs):

    avg_cost = 0

    avg_acc = 0

    

    total_batch = int(len(imbalance_train_x) / batch_size)

    

    for i in range(total_batch):

        batch = next_batch(32, imbalance_train_x, imbalance_train_y_one_hot)

        feed_dict = {X: batch[0], Y: batch[1], keep_prob: 0.7}

        c, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict=feed_dict)

        

        avg_cost += c / total_batch

        avg_acc += acc / total_batch

        

    print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost), 'Accuracy =', '{:.9f}'.format(avg_acc))

print('Learning Finish')



print('----------TEST----------')

# test data

one_hot = [a for i in y_test for a in i]

idx = 0


one_hot_y = []


for i in one_hot:

    one_hot_y.append(np.asscalar(one_hot[idx]))

    idx+=1

    

one_hot = []

one_hot.append(one_hot_y)


# x reduce_demension


# y reduce_demension

one_hot = [a for i in one_hot for a in i]

# one_hot _encoder

imbalance_train_y_one_hot2 = one_hot_encoding(one_hot)


# cal ACC

print('----------TEST----------')

total_batch = int(len(x_test) / batch_size)


test_acc = 0

for i in range(total_batch):

    batch = next_batch(32, x_test, imbalance_train_y_one_hot2)

    feed_dict = {X: batch[0], Y: batch[1], keep_prob: 1.0}

    c, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict=feed_dict)

    test_acc += acc / total_batch

    

print('Test Accuracy = {0}'.format(test_acc))

print('END ALL')

sess.close()


import tensorflow as tf

import matplotlib

matplotlib.use('agg')

import numpy as np

import pandas as pd

import matplotlib.pyplot as plt

import seaborn as sns

import random

import itertools

from sklearn.manifold import TSNE

from tensorflow.examples.tutorials.mnist import input_data


def next_batch(data_count, data_x, data_y):

    idx = np.arange(0, len(data_x))

    np.random.shuffle(idx)

    idx = idx[:data_count]

    

    data_shuffle = [data_x[i] for i in idx]

    labels_shuffle = [data_y[i] for i in idx]


    return np.asarray(data_shuffle), np.asarray(labels_shuffle)


learning_rate = 0.001

training_epochs = 10

batch_size = 32


# load MNIST data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    

# Network Model.    

tf.set_random_seed(777)


keep_prob = tf.placeholder(tf.float32)


X = tf.placeholder(tf.float32, [None, 784])

X_img = tf.reshape(X, [-1, 28, 28, 1])  

Y = tf.placeholder(tf.float32, [None, 10])


W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))

L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')

L1 = tf.layers.batch_normalization(L1, center=True, scale=True, training=True)

L1 = tf.nn.relu(L1)

L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

L1 = tf.nn.dropout(L1, keep_prob=keep_prob)


W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))

L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')

L2 = tf.layers.batch_normalization(L2, center=True, scale=True, training=True)

L2 = tf.nn.relu(L2)

L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

L2 = tf.nn.dropout(L2, keep_prob=keep_prob)


W3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))

L3 = tf.nn.conv2d(L2, W3, strides=[1, 1, 1, 1], padding='SAME')

L3 = tf.layers.batch_normalization(L3, center=True, scale=True, training=True)

L3 = tf.nn.relu(L3)

L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

L3 = tf.nn.dropout(L3, keep_prob=keep_prob)

L3_flat = tf.reshape(L3, [-1, 128 * 4 * 4])


W4 = tf.get_variable("W19", shape=[128 * 4 * 4, 100], initializer=tf.contrib.layers.xavier_initializer())

b4 = tf.Variable(tf.random_normal([100]))

L4 = tf.nn.relu(tf.matmul(L3_flat, W4) + b4)

L4 = tf.nn.dropout(L4, keep_prob=keep_prob)


W5 = tf.get_variable("W20", shape=[100, 10], initializer=tf.contrib.layers.xavier_initializer())

b5 = tf.Variable(tf.random_normal([10]))


logits = tf.matmul(L4, W5) + b5

y_pred = tf.nn.softmax(logits)


cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=logits))

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)


correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(Y, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


sess = tf.Session()

sess.run(tf.global_variables_initializer())


print('Learning start')

for epoch in range(training_epochs):

    avg_cost = 0

    avg_acc = 0

    

    total_batch = int(len(mnist.train.images) / batch_size)

    

    for i in range(total_batch):

        batch = next_batch(32, mnist.train.images, mnist.train.labels)

        feed_dict = {X: batch[0], Y: batch[1], keep_prob: 0.7}

        c, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict=feed_dict)

        

        avg_cost += c / total_batch

        avg_acc += acc / total_batch


    print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost), 'Accuracy =', '{:.9f}'.format(avg_acc))

print('Learning Finish')



print('----------TEST----------')

total_batch = int(len(mnist.test.images) / batch_size)


test_acc = 0

for i in range(total_batch):

    batch = next_batch(32, mnist.test.images, mnist.test.labels)

    feed_dict = {X: batch[0], Y: batch[1], keep_prob: 1.0}

    c, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict=feed_dict)

    test_acc += acc / total_batch

    

print('Test Accuracy = {0}'.format(test_acc))

print('END ALL')

sess.close()


Asterisk

  • 가변인자 라고도 불림.
  • * 연산자를 이용.
  • 함수에 여러 인자를 넘겨줄때 사용.

# 단순 값을 넘겨 줄때. - 튜블 타입

def test(a, *args):

    print(a, args)

    print(type(args))

    print(len(args))

    print(args[0])

    

# 1 값은 a에 할당 되고 나머지는 args에 할당.

test(1,2,3,4,5,6)

print('================')

test(1,(2,3,4,5,6))

print('================')

test(1,*(2,3,4,5,6))


# Keyword 값을 넘겨 줄때. - dict 타입

def test(a, **kargs):

    print(a, kargs)

    print(type(kargs))

    print(len(kargs))

    print(kargs['b'])


test(1,b=2,c=3,d=4,e=5,f=6)


# unpacking - 변수를 풀어준다... 

a,b,c = ([1,2], [3,4], [5,6])

print(a, b, c)


data = ([1,2], [3,4], [5,6])

print(*data)


def test(a,b,c,d):

    print(a,b,c,d)


data = {"b":1, "c":2, "d":3}

test(11, **data)



'Study > Code' 카테고리의 다른 글

python list 중복 값 카운터하기  (0) 2019.02.22
numpy를 이용해 새로운 array 만들기.  (0) 2019.02.16
Python lamdba, map ,reduce  (0) 2018.12.27
Python list Comprehension  (0) 2018.12.27
pickle 파일 읽기 , 저장  (0) 2018.12.20



# 기존 함수

def fx(x, y):

    return x+y

print("Fuction :",fx(1,9))

print('=====================================')


# lamdba

fx2 = lambda x : x + x

print("Lambda :",fx2(7))

print('=====================================')


# map 

A_list = [i for i in range(5)]

print("List :", A_list)

print("Map :",list(map(fx2, A_list)))

print('=====================================')


# list Comprehension

print("List Comprehension :",[v ** 2 for v in A_list])

print('=====================================')


# Reduce

B_list = [i for i in range(0,11)]

from functools import reduce

print("Reduce :",reduce(lambda x, y : x+y, B_list))

print('=====================================')


#sum 

print("Sum :",sum(B_list))



+ Recent posts