diff --git a/test_and_documents/test.py b/test_and_documents/test.py index db7e2be3ae330ad3523d218a1f20864dad082c2a..9d998d8fcc7d6b1051c73bb1338a68f7c50de3c4 100644 --- a/test_and_documents/test.py +++ b/test_and_documents/test.py @@ -1,80 +1,34 @@ -import math -import numpy as np -import paddle -from paddle import matmul, transpose, trace from paddle_quantum.circuit import UAnsatz -from paddle_quantum.utils import dagger, random_pauli_str_generator, pauli_str_to_matrix -from paddle_quantum.state import vec, vec_random, density_op, density_op_random -theta_size = 4 -num_qubits = 2 -ITR = 80 -LR=0.5 -SEED = 1 -paddle.seed(SEED) -path = '/home/aistudio/飞桨常规赛:量子电路合成/Question_2_Unitary.txt' - +from paddle import kron +from paddle_quantum.state import vec,density_op +import paddle -#普通构建方法 -def U_theta(theta,num_qubits): - cir = UAnsatz(num_qubits) - cir.ry(theta[0],0) - cir.ry(theta[1],1) - cir.cnot([0,1]) - cir.ry(theta[2],0) - cir.ry(theta[3],1) - return cir.U -#通过量子比特扩展 -def U_theta2(theta,num_qubits): +#density_matrix +def test_density_matrix(): cir = UAnsatz(1) - cir.ry(theta[0],0) - cir.expand(num_qubits) - cir.ry(theta[1],1) - cir.cnot([0,1]) - cir.ry(theta[2],0) - cir.ry(theta[3],1) - return cir.U + cir.ry(paddle.to_tensor(1,dtype='float64'),0) + state = cir.run_density_matrix() + cir.expand(3) + print(cir.get_state()) -class Optimization_exl(paddle.nn.Layer): - def __init__(self,shape,dtype='float64'): - super(Optimization_exl,self).__init__() - f = np.loadtxt(path) - self.u = paddle.to_tensor(f) - self.theta = self.create_parameter(shape=shape, - default_initializer = paddle.nn.initializer.Uniform(low=0,high=2*np.pi), - dtype=dtype,is_bias=False) - - def forward(self): - #U = U_theta(self.theta,num_qubits) - U = U_theta2(self.theta,num_qubits) - U_dagger = dagger(U) - loss = 1-(0.25*paddle.real(trace(matmul(self.u,U_dagger)))[0]) - return loss - - def result(self): - return self.theta + cir2 = UAnsatz(3) + cir2.ry(paddle.to_tensor(1,dtype='float64'),0) + cir2.run_density_matrix() + print(cir2.get_state()) -loss_list = [] -parameter_list = [] -myLayer = Optimization_exl([theta_size]) -opt = paddle.optimizer.Adam(learning_rate=LR,parameters=myLayer.parameters()) +#state_vector +def test_state_vector(): + cir = UAnsatz(1) + cir.ry(paddle.to_tensor(1,dtype='float64'),0) + state = cir.run_state_vector() + cir.expand(3) + print(cir.get_state()) -for itr in range(ITR): - loss = myLayer()[0] - loss.backward() - opt.minimize(loss) - opt.clear_grad() + cir2 = UAnsatz(3) + cir2.ry(paddle.to_tensor(1,dtype='float64'),0) + cir2.run_state_vector() + print(cir2.get_state()) - loss_list.append(loss.numpy()[0]) - parameter_list.append(myLayer.parameters()[0].numpy()) - #if itr % 5 == 0: - #print('iter:', itr, ' loss: %.4f' % loss.numpy()) -print(myLayer.result()) -""" -U_theta 结果 -Tensor(shape=[4], dtype=float64, place=CPUPlace, stop_gradient=False, - [-0.43797367, 7.05269038, 3.48305136, 0.42135362]) -U_theta2 结果 -Tensor(shape=[4], dtype=float64, place=CPUPlace, stop_gradient=False, - [-0.43797367, 7.05269038, 3.48305136, 0.42135362]) -""" +test_density_matrix() +test_state_vector()