ray | unified framework for scaling AI | Machine Learning library
kandi X-RAY | ray Summary
Support
Quality
Security
License
Reuse
- Build an EagerTFP policy .
- Initialize the driver .
- Build a policy class .
- Process observed observations .
- Build a TF policy .
- Start a Raylet .
- Run a test suite .
- Create a runtime environment .
- Create record for cluster stats
- Calculate CQL loss .
ray Key Features
ray Examples and Code Snippets
import argparse import os import numpy as np from tqdm import tqdm import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms import horovod.torch as hvd from horovod.torch.elastic.sampler import ElasticSampler from horovod.ray import ray_logger from horovod.ray.elastic import TestDiscovery # Training settings parser = argparse.ArgumentParser( description='PyTorch MNIST Example', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--log-dir', default='./logs', help='tensorboard log directory') parser.add_argument( '--checkpoint-format', default='./checkpoint-{epoch}.pth.tar', help='checkpoint file format') parser.add_argument( '--data-dir', default='./new_data', help='MNIST dataset directory') parser.add_argument( '--epochs', type=int, default=90, help='number of epochs to train') parser.add_argument( '--lr', type=float, default=0.01, help='learning rate for a single GPU') parser.add_argument( '--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=42, help='random seed') parser.add_argument( '--forceful', action="store_true", help="Removes the node upon deallocation (non-gracefully).") parser.add_argument( '--change-frequency-s', type=int, default=20, help='random seed') # Elastic Horovod settings parser.add_argument( '--batches-per-commit', type=int, default=50, help='number of batches processed before calling `state.commit()`; ' 'commits prevent losing progress if an error occurs, but slow ' 'down training.') parser.add_argument( '--batches-per-host-check', type=int, default=10, help=( 'number of batches processed before calling ' '`state.check_host_updates()`; ' 'this check is very fast compared to state.commit() (which calls this ' 'as part of the commit process) but because it ' 'still incurs some cost due to broadcast, ' 'we may not want to perform it every batch.')) parser.add_argument( '--data-dir', help='location of the training dataset in the local filesystem (will be downloaded if needed)' ) args = parser.parse_args() def load_data_mnist(): # Horovod: limit # of CPU threads to be used per worker. torch.set_num_threads(4) kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {} data_dir = args.data_dir or './data' from filelock import FileLock with FileLock(os.path.expanduser("~/.horovod_lock")): train_dataset = \ datasets.MNIST(data_dir, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])) train_sampler = ElasticSampler(train_dataset) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=8, sampler=train_sampler, **kwargs) return train_loader, train_sampler class tqdm_callback: def __init__(self): self._progress_bar = None self._current_epoch = None self._world_size = None self._mode = None def __call__(self, info): tqdm_mode = info["tqdm_mode"] assert tqdm_mode in {"val", "train"} reset = False if self._mode != tqdm_mode or \ self._current_epoch != info["epoch"] or \ self._world_size != info["world_size"]: reset = True self._mode = tqdm_mode self._current_epoch = info["epoch"] self._world_size = info["world_size"] if reset: if self._progress_bar is not None: self._progress_bar.close() epoch = self._current_epoch + 1 self._progress_bar = tqdm( total=info["total"], desc=f'[mode={tqdm_mode}] Epoch #{epoch}') scoped = {k: v for k, v in info.items() if k.startswith(tqdm_mode)} self._progress_bar.set_postfix(scoped) self._progress_bar.update(1) class TensorboardCallback: def __init__(self, logdir): from torch.utils.tensorboard import SummaryWriter self.log_writer = SummaryWriter(logdir) def __call__(self, info): tqdm_mode = info["tqdm_mode"] epoch = info["epoch"] for k, v in info.items(): if k.startswith(tqdm_mode): self.log_writer.add_scalar(k, v, epoch) def train(state, train_loader): epoch = state.epoch batch_offset = state.batch state.model.train() state.train_sampler.set_epoch(epoch) train_loss = Metric('train_loss') train_accuracy = Metric('train_accuracy') for batch_idx, (data, target) in enumerate(train_loader): # Elastic Horovod: update the current batch index this epoch # and commit / check for host updates. Do not check hosts when # we commit as it would be redundant. state.batch = batch_offset + batch_idx if args.batches_per_commit > 0 and \ state.batch % args.batches_per_commit == 0: state.commit() elif args.batches_per_host_check > 0 and \ state.batch % args.batches_per_host_check == 0: state.check_host_updates() if args.cuda: data, target = data.cuda(), target.cuda() state.optimizer.zero_grad() output = state.model(data) train_accuracy.update(accuracy(output, target)) loss = F.cross_entropy(output, target) train_loss.update(loss) loss.backward() state.optimizer.step() # Only log from the 0th rank worker. if hvd.rank() == 0: ray_logger.log({ "tqdm_mode": 'train', "train/loss": train_loss.avg.item(), "train/accuracy": 100. * train_accuracy.avg.item(), "total": len(train_loader), "epoch": epoch, "world_size": hvd.size() }) def accuracy(output, target): # get the index of the max log-probability pred = output.max(1, keepdim=True)[1] return pred.eq(target.view_as(pred)).cpu().float().mean() def save_checkpoint(state): if hvd.rank() == 0: filepath = args.checkpoint_format.format(epoch=state.epoch + 1) state = { 'model': state.model.state_dict(), 'optimizer': state.optimizer.state_dict(), 'scheduler': state.scheduler.state_dict(), } torch.save(state, filepath) def end_epoch(state): state.epoch += 1 state.batch = 0 state.train_sampler.set_epoch(state.epoch) state.commit() # Horovod: average metrics from distributed training. class Metric(object): def __init__(self, name): self.name = name self.sum = torch.tensor(0.) self.n = torch.tensor(0.) def update(self, val): self.sum += hvd.allreduce(val.detach().cpu(), name=self.name) self.n += 1 @property def avg(self): return self.sum / self.n class Net(nn.Module): def __init__(self, large=False): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 300) self.hiddens = [] if large: self.hiddens = nn.ModuleList( [nn.Linear(300, 300) for i in range(30)]) self.fc2 = nn.Linear(300, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) if self.hiddens: for layer in self.hiddens: x = F.relu(layer(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x) def run(large=False): hvd.init() torch.manual_seed(args.seed) args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda: # Horovod: pin GPU to local rank. torch.cuda.set_device(hvd.local_rank()) torch.cuda.manual_seed(args.seed) # If set > 0, will resume training from a given checkpoint. resume_from_epoch = 0 for try_epoch in range(args.epochs, 0, -1): if os.path.exists(args.checkpoint_format.format(epoch=try_epoch)): resume_from_epoch = try_epoch break # Load MNIST dataset train_loader, train_sampler = load_data_mnist() model = Net(large=large) if args.cuda: model.cuda() # Horovod: scale learning rate by the number of GPUs. optimizer = optim.SGD( model.parameters(), lr=args.lr * np.sqrt(hvd.size()), momentum=0.9, weight_decay=5e-4) # Horovod: wrap optimizer with DistributedOptimizer. optimizer = hvd.DistributedOptimizer( optimizer, named_parameters=model.named_parameters()) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=200) # Restore from a previous checkpoint, if initial_epoch is specified. # Horovod: restore on the first worker which will broadcast # weights to other workers. if resume_from_epoch > 0 and hvd.rank() == 0: filepath = args.checkpoint_format.format(epoch=resume_from_epoch) checkpoint = torch.load(filepath) model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) def on_state_reset(): # Horovod: scale the learning rate as controlled by the LR schedule scheduler.base_lrs = [args.lr * hvd.size() for _ in scheduler.base_lrs] state = hvd.elastic.TorchState( model=model, optimizer=optimizer, scheduler=scheduler, train_sampler=train_sampler, epoch=resume_from_epoch, batch=0) state.register_reset_callbacks([on_state_reset]) @hvd.elastic.run def full_train(state, train_loader): while state.epoch < args.epochs: train(state, train_loader) state.scheduler.step() save_checkpoint(state) end_epoch(state) full_train(state, train_loader) if __name__ == '__main__': from horovod.ray import ElasticRayExecutor import ray ray.init(address="auto") settings = ElasticRayExecutor.create_settings(verbose=2) settings.discovery = TestDiscovery( min_hosts=2, max_hosts=5, change_frequency_s=args.change_frequency_s, use_gpu=True, cpus_per_slot=1, _graceful=not args.forceful, verbose=False) executor = ElasticRayExecutor( settings, use_gpu=True, cpus_per_slot=1, override_discovery=False) executor.start() executor.run( lambda: run(large=True), callbacks=[tqdm_callback(), TensorboardCallback(args.log_dir)])
import argparse import os from filelock import FileLock import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms import torch.utils.data.distributed import horovod.torch as hvd # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument( '--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument( '--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument( '--epochs', type=int, default=5, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument( '--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument( '--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument( '--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument( '--seed', type=int, default=42, metavar='S', help='random seed (default: 42)') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument( '--fp16-allreduce', action='store_true', default=False, help='use fp16 compression during allreduce') parser.add_argument( '--use-adasum', action='store_true', default=False, help='use adasum algorithm to do reduction') parser.add_argument( '--num-batches-per-commit', type=int, default=1, help='number of batches per commit of the elastic state object' ) parser.add_argument( '--data-dir', help='location of the training dataset in the local filesystem (will be downloaded if needed)' ) args = parser.parse_args() def metric_average(val, name): tensor = torch.tensor(val) avg_tensor = hvd.allreduce(tensor, name=name) return avg_tensor.item() class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x) def train_fn(): # Horovod: initialize library. hvd.init() torch.manual_seed(args.seed) args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda: # Horovod: pin GPU to local rank. torch.cuda.set_device(hvd.local_rank()) torch.cuda.manual_seed(args.seed) # Horovod: limit # of CPU threads to be used per worker. torch.set_num_threads(1) kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} data_dir = args.data_dir or './data' with FileLock(os.path.expanduser("~/.horovod_lock")): train_dataset = \ datasets.MNIST(data_dir, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])) # Horovod: use DistributedSampler to partition the training data. train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset, num_replicas=hvd.size(), rank=hvd.rank()) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs) transformations = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) test_dataset = datasets.MNIST( data_dir, train=False, transform=transformations) # Horovod: use DistributedSampler to partition the test data. test_sampler = torch.utils.data.distributed.DistributedSampler( test_dataset, num_replicas=hvd.size(), rank=hvd.rank()) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=args.test_batch_size, sampler=test_sampler, **kwargs) model = Net() # By default, Adasum doesn't need scaling up learning rate. lr_scaler = hvd.size() if not args.use_adasum else 1 if args.cuda: # Move model to GPU. model.cuda() # If using GPU Adasum allreduce, scale learning rate by local_size. if args.use_adasum and hvd.nccl_built(): lr_scaler = hvd.local_size() # Horovod: scale learning rate by lr_scaler. optimizer = optim.SGD( model.parameters(), lr=args.lr * lr_scaler, momentum=args.momentum) # Horovod: (optional) compression algorithm. compression = (hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none) @hvd.elastic.run def train(state): # post synchronization event (worker added, worker removed) init ... for state.epoch in range(state.epoch, args.epochs + 1): state.model.train() train_sampler.set_epoch(state.epoch) steps_remaining = len(train_loader) - state.batch for state.batch, (data, target) in enumerate(train_loader): if state.batch >= steps_remaining: break if args.cuda: data, target = data.cuda(), target.cuda() state.optimizer.zero_grad() output = state.model(data) loss = F.nll_loss(output, target) loss.backward() state.optimizer.step() if state.batch % args.log_interval == 0: # Horovod: use train_sampler to determine # the number of examples in this worker's partition. print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'. format(state.epoch, state.batch * len(data), len(train_sampler), 100.0 * state.batch / len(train_loader), loss.item())) if (state.batch + 1) % args.num_batches_per_commit == 0: state.commit() state.batch = 0 def test(): model.eval() test_loss = 0. test_accuracy = 0. for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() output = model(data) # sum up batch loss test_loss += F.nll_loss(output, target, size_average=False).item() # get the index of the max log-probability pred = output.data.max(1, keepdim=True)[1] test_accuracy += pred.eq( target.data.view_as(pred)).cpu().float().sum() # Horovod: use test_sampler to determine the number of examples in # this worker's partition. test_loss /= len(test_sampler) test_accuracy /= len(test_sampler) # Horovod: average metric values across workers. test_loss = metric_average(test_loss, 'avg_loss') test_accuracy = metric_average(test_accuracy, 'avg_accuracy') # Horovod: print output only on first rank. if hvd.rank() == 0: print( '\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format( test_loss, 100. * test_accuracy)) # Horovod: wrap optimizer with DistributedOptimizer. optimizer = hvd.DistributedOptimizer( optimizer, named_parameters=model.named_parameters(), compression=compression, op=hvd.Adasum if args.use_adasum else hvd.Average) # adjust learning rate on reset def on_state_reset(): for param_group in optimizer.param_groups: param_group['lr'] = args.lr * hvd.size() state = hvd.elastic.TorchState(model, optimizer, epoch=1, batch=0) state.register_reset_callbacks([on_state_reset]) train(state) test() if __name__ == '__main__': from horovod.ray import ElasticRayExecutor import ray ray.init(address="auto") settings = ElasticRayExecutor.create_settings(verbose=True) executor = ElasticRayExecutor(settings, use_gpu=True, cpus_per_slot=2) executor.start() executor.run(train_fn)
import argparse import tensorflow as tf import horovod.tensorflow.keras as hvd import ray from horovod.ray import RayExecutor parser = argparse.ArgumentParser( description='Tensorflow Ray example', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--address', type=str, help="Ray cluster address") def train(num_epochs): # Horovod: initialize Horovod. hvd.init() # Horovod: pin GPU to be used to process local rank (one GPU per process) gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') (mnist_images, mnist_labels), _ = \ tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % hvd.rank()) dataset = tf.data.Dataset.from_tensor_slices( (tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32), tf.cast(mnist_labels, tf.int64))) dataset = dataset.repeat().shuffle(10000).batch(128) mnist_model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, [3, 3], activation='relu'), tf.keras.layers.Conv2D(64, [3, 3], activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Dropout(0.25), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='softmax') ]) # Horovod: adjust learning rate based on number of GPUs. scaled_lr = 0.001 * hvd.size() opt = tf.optimizers.Adam(scaled_lr) # Horovod: add Horovod DistributedOptimizer. opt = hvd.DistributedOptimizer(opt) # Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow # uses hvd.DistributedOptimizer() to compute gradients. mnist_model.compile( loss=tf.losses.SparseCategoricalCrossentropy(), optimizer=opt, metrics=['accuracy'], experimental_run_tf_function=False) callbacks = [ # Horovod: broadcast initial variable states from rank 0 to all other processes. # This is necessary to ensure consistent initialization of all workers when # training is started with random weights or restored from a checkpoint. hvd.callbacks.BroadcastGlobalVariablesCallback(0), # Horovod: average metrics among workers at the end of every epoch. # # Note: This callback must be in the list before the ReduceLROnPlateau, # TensorBoard or other metrics-based callbacks. hvd.callbacks.MetricAverageCallback(), # Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final # accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during # the first three epochs. See https://arxiv.org/abs/1706.02677 for details. hvd.callbacks.LearningRateWarmupCallback( initial_lr=scaled_lr, warmup_epochs=3, verbose=1), ] # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them. if hvd.rank() == 0: callbacks.append( tf.keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5')) # Horovod: write logs on worker 0. verbose = 1 if hvd.rank() == 0 else 0 # Train the model. # Horovod: adjust number of steps based on number of GPUs. mnist_model.fit( dataset, steps_per_epoch=500 // hvd.size(), callbacks=callbacks, epochs=num_epochs, verbose=verbose) if __name__ == '__main__': args = parser.parse_args() ray.init(address=args.address) settings = RayExecutor.create_settings(timeout_s=30) executor = RayExecutor( settings, num_hosts=2, num_slots=4, use_gpu=False, cpus_per_slot=8) executor.start() executor.run(train, kwargs=dict(num_epochs=1)) executor.shutdown()
Trending Discussions on ray
Trending Discussions on ray
QUESTION
I am changing the font of a figure caption in my R Markdown and am using bookdown
and pandoc to do so. My question is closely related to: How to change the figure caption format in bookdown?. I was able to get correct figure numbering and was able to alter the format of the "Figure 1" portion of the caption. However, I cannot figure out how to remove the colon in the output (i.e., "Figure 1:. ").
Minimal Example
Pandoc Function (taken from here)
function Image (img)
img.caption[1] = pandoc.Strong(img.caption[1])
img.caption[3] = pandoc.Strong(img.caption[3])
img.caption[4] = pandoc.Strong(". ")
return img
end
To use function Image
in the R Markdown, save the file as "figure_caption_patch.lua", which will be called in pandoc_args
in the YAML metadata.
R Markdown
---
title: Hello World
author: "Somebody"
output:
bookdown::word_document2:
fig_caption: yes
number_sections: FALSE
pandoc_args: ["--lua-filter", "figure_caption_patch.lua"]
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
# Test
Some text (Figure \@ref(fig:Xray)). Some text followed by a figure:
```{r Xray, fig.cap="Single-crystal X-ray structure of some text", echo=FALSE}
plot(cars)
```
Output
Figure 1:. This is a caption.
Desired Output
Figure 1. This is a caption.
In the pandoc function, I tried to subset the string of img.caption[3]
, but it did not work. I tried the following:
img.caption[3] = pandoc.Strong(string.sub(img.caption[3], 1, 1))
I know that if I was using R, then I could do something like:
a = c("words", "again")
substring(a, 1, 1)[1]
#output
[1] "w"
But unsure, how to do this with pandoc.
ANSWER
Answered 2021-Jul-24 at 09:34Looks like there was a change in rmarkdown
which adds a colon by default. Also the reason why the answer in the linked post does not work anymore. For more on this and a solution see https://community.rstudio.com/t/how-to-change-the-figure-table-caption-style-in-bookdown/110397.
Besides the solution offered there you could achieve your desired result by replacing the colon by a dot. Adapting the lua filter provided by https://stackoverflow.com/a/59301855/12993861 this could done like so:
function Image (img)
img.caption[1] = pandoc.Strong(img.caption[1])
img.caption[3] = pandoc.Strong(pandoc.Str(string.gsub(img.caption[3].text, ":", ".")))
return img
end
QUESTION
I am trying to install conda on EMR and below is my bootstrap script, it looks like conda is getting installed but it is not getting added to environment variable. When I manually update the $PATH
variable on EMR master node, it can identify conda
. I want to use conda on Zeppelin.
I also tried adding condig into configuration like below while launching my EMR instance however I still get the below mentioned error.
"classification": "spark-env",
"properties": {
"conda": "/home/hadoop/conda/bin"
}
[hadoop@ip-172-30-5-150 ~]$ PATH=/home/hadoop/conda/bin:$PATH
[hadoop@ip-172-30-5-150 ~]$ conda
usage: conda [-h] [-V] command ...
conda is a tool for managing and deploying applications, environments and packages.
#!/usr/bin/env bash
# Install conda
wget https://repo.continuum.io/miniconda/Miniconda3-4.2.12-Linux-x86_64.sh -O /home/hadoop/miniconda.sh \
&& /bin/bash ~/miniconda.sh -b -p $HOME/conda
conda config --set always_yes yes --set changeps1 no
conda install conda=4.2.13
conda config -f --add channels conda-forge
rm ~/miniconda.sh
echo bootstrap_conda.sh completed. PATH now: $PATH
export PYSPARK_PYTHON="/home/hadoop/conda/bin/python3.5"
echo -e '\nexport PATH=$HOME/conda/bin:$PATH' >> $HOME/.bashrc && source $HOME/.bashrc
conda create -n zoo python=3.7 # "zoo" is conda environment name, you can use any name you like.
conda activate zoo
sudo pip3 install tensorflow
sudo pip3 install boto3
sudo pip3 install botocore
sudo pip3 install numpy
sudo pip3 install pandas
sudo pip3 install scipy
sudo pip3 install s3fs
sudo pip3 install matplotlib
sudo pip3 install -U tqdm
sudo pip3 install -U scikit-learn
sudo pip3 install -U scikit-multilearn
sudo pip3 install xlutils
sudo pip3 install natsort
sudo pip3 install pydot
sudo pip3 install python-pydot
sudo pip3 install python-pydot-ng
sudo pip3 install pydotplus
sudo pip3 install h5py
sudo pip3 install graphviz
sudo pip3 install recmetrics
sudo pip3 install openpyxl
sudo pip3 install xlrd
sudo pip3 install xlwt
sudo pip3 install tensorflow.io
sudo pip3 install Cython
sudo pip3 install ray
sudo pip3 install zoo
sudo pip3 install analytics-zoo
sudo pip3 install analytics-zoo[ray]
#sudo /usr/bin/pip-3.6 install -U imbalanced-learn
ANSWER
Answered 2022-Feb-05 at 00:17I got the conda working by modifying the script as below, emr python versions were colliding with the conda version.:
wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.9.2-Linux-x86_64.sh -O /home/hadoop/miniconda.sh \
&& /bin/bash ~/miniconda.sh -b -p $HOME/conda
echo -e '\n export PATH=$HOME/conda/bin:$PATH' >> $HOME/.bashrc && source $HOME/.bashrc
conda config --set always_yes yes --set changeps1 no
conda config -f --add channels conda-forge
conda create -n zoo python=3.7 # "zoo" is conda environment name
conda init bash
source activate zoo
conda install python 3.7.0 -c conda-forge orca
sudo /home/hadoop/conda/envs/zoo/bin/python3.7 -m pip install virtualenv
and setting zeppelin python and pyspark parameters to:
“spark.pyspark.python": "/home/hadoop/conda/envs/zoo/bin/python3",
"spark.pyspark.virtualenv.enabled": "true",
"spark.pyspark.virtualenv.type":"native",
"spark.pyspark.virtualenv.bin.path":"/home/hadoop/conda/envs/zoo/bin/,
"zeppelin.pyspark.python" : "/home/hadoop/conda/bin/python",
"zeppelin.python": "/home/hadoop/conda/bin/python"
Orca only support TF upto 1.5 hence it was not working as I am using TF2.
QUESTION
I am at a complete loss and really freaking out, because this project of mine was close to being done. I will give out a bounty for the answer that helps me (when I can). I am desperate, please help.
I have an Elastic Beanstalk project that has been working fine for literally months. Today, I decide to enable and disable a port listener as seen in the photo below:
I enabled port 80
and then the website stopped working. So I was like "oh crap, I will change it back". But guess what? It is still broken. The code has not changed whatsoever, but the application is now broken and I am freaking out.
I have restarted the app servers, rebuilt the environment and nothing. I can't even access the environment site by clicking Go to environment
. I just see a Bad Gateway
message on screen. The health status of the environment when first deployed is OK
and then quickly goes to Severe
.
If my code has not changed, what is the deal here? How can I find out what is going on here? All I changed was that port, by enabling and then disabling again.
I have already come across this question: Question and I am already doing this. This environment variable is on my application.properties
file like this: server.port=5000
and its been like this for months and HAS ALREADY been working. So this can't be the reason that it broke today. I even tried adding it directly to the environment variables in Elastic Beanstalk console and same result, still getting 502 Bad Gateway.
I also have a path for the health-check configured and this has not changed in months.
Here are the last 100 lines from my log file after health status goes to Severe
:
----------------------------------------
/var/log/eb-engine.log
----------------------------------------
2022/01/27 15:53:53.370165 [INFO] Running command /bin/sh -c docker tag af10382f81a4 aws_beanstalk/current-app
2022/01/27 15:53:53.489035 [INFO] Running command /bin/sh -c docker rmi aws_beanstalk/staging-app
2022/01/27 15:53:53.568222 [INFO] Untagged: aws_beanstalk/staging-app:latest
2022/01/27 15:53:53.568307 [INFO] Running command /bin/sh -c systemctl show -p PartOf eb-docker.service
2022/01/27 15:53:53.576541 [INFO] Running command /bin/sh -c systemctl daemon-reload
2022/01/27 15:53:53.712836 [INFO] Running command /bin/sh -c systemctl reset-failed
2022/01/27 15:53:53.720035 [INFO] Running command /bin/sh -c systemctl enable eb-docker.service
2022/01/27 15:53:53.866046 [INFO] Running command /bin/sh -c systemctl show -p PartOf eb-docker.service
2022/01/27 15:53:53.875112 [INFO] Running command /bin/sh -c systemctl is-active eb-docker.service
2022/01/27 15:53:53.886916 [INFO] Running command /bin/sh -c systemctl start eb-docker.service
2022/01/27 15:53:53.991608 [INFO] Running command /bin/sh -c systemctl show -p PartOf eb-docker-log.service
2022/01/27 15:53:54.002839 [INFO] Running command /bin/sh -c systemctl daemon-reload
2022/01/27 15:53:54.092602 [INFO] Running command /bin/sh -c systemctl reset-failed
2022/01/27 15:53:54.102854 [INFO] Running command /bin/sh -c systemctl enable eb-docker-log.service
2022/01/27 15:53:54.226561 [INFO] Running command /bin/sh -c systemctl show -p PartOf eb-docker-log.service
2022/01/27 15:53:54.246914 [INFO] Running command /bin/sh -c systemctl is-active eb-docker-log.service
2022/01/27 15:53:54.263293 [INFO] Running command /bin/sh -c systemctl start eb-docker-log.service
2022/01/27 15:53:54.433800 [INFO] docker container 3771e61e64ae is running aws_beanstalk/current-app
2022/01/27 15:53:54.433823 [INFO] Executing instruction: Clean up Docker
2022/01/27 15:53:54.433842 [INFO] Running command /bin/sh -c docker ps -aq
2022/01/27 15:53:54.638602 [INFO] 3771e61e64ae
2022/01/27 15:53:54.638644 [INFO] Running command /bin/sh -c docker images | sed 1d
2022/01/27 15:53:54.810723 [INFO] aws_beanstalk/current-app latest af10382f81a4 13 seconds ago 597MB
adafe645300e 24 seconds ago 732MB
openjdk 8 3bc5f7759e81 30 hours ago 526MB
maven 3.8.1-jdk-8 498ac51e5e6e 6 months ago 525MB
2022/01/27 15:53:54.810767 [INFO] save docker tag command: docker tag af10382f81a4 aws_beanstalk/current-app:latest
2022/01/27 15:53:54.810772 [INFO] save docker tag command: docker tag adafe645300e :
2022/01/27 15:53:54.810776 [INFO] save docker tag command: docker tag 3bc5f7759e81 openjdk:8
2022/01/27 15:53:54.810781 [INFO] save docker tag command: docker tag 498ac51e5e6e maven:3.8.1-jdk-8
2022/01/27 15:53:54.810793 [INFO] Running command /bin/sh -c docker rm `docker ps -aq`
2022/01/27 15:53:54.964217 [INFO] Running command /bin/sh -c docker rmi `docker images -aq`
2022/01/27 15:53:56.249352 [INFO] Deleted: sha256:adafe645300e41dd29b04abccf86a562ad5e635bd6afff9343b6a45721fb3a45
Deleted: sha256:b78c0f45b590e7c8c496466450e2fecf2e31044dd53bcf8d9c64a9e7a8c84139
Deleted: sha256:16aedb83589da925c19d2f692234a2a36c017b35846c07fd8ad6817cceda6ae9
Deleted: sha256:a568ba4507a603b7ace044d64726daaf3022c817cc9550779d64dbb95d0e1e5d
Deleted: sha256:fe90a30920d18ecad75ec02e8c04894fbcaadc209529c3e5c14fdaa66d3a7bc9
Deleted: sha256:7c72fe5e2da958b5d44267aa9de538c274e70125c902bc3e663af4c5c87280dc
Untagged: maven:3.8.1-jdk-8
Untagged: maven@sha256:cba6d738a97e81e8845d60ee2662f020385d01d6135a2cf75bc1f5a84980ef88
Deleted: sha256:498ac51e5e6e99ae8646d007ed554587a4ceeab78a664dc7eedde7137c658e9e
Deleted: sha256:de026bec49cbc1fd7bd1bd7aa03d544713985e39bc0a913f4c0a59dbcc556715
Deleted: sha256:f5c45a5e495b035f37dc2e19d8ead0458cf0ad8b83d5573cc9b4016ea54814b6
Deleted: sha256:9f871694bb9a37f62b6baf12760480448d46e008c8c85f06dab5340b16d11a2b
Deleted: sha256:19a57d2c318dfeac5de4cac0a5263af560eff01c620100570c83658e12df0a87
Deleted: sha256:bc20a3f84b95792033865bff3c1cc53b060108ef2018b1913da3c8eddda77b99
Deleted: sha256:f33d6ed931ff64c63168af00c7544d148d01fda66831246572ff2bfcacbcf2d6
Deleted: sha256:017b9704876de2443b332b1dfec580d365184b514eb0af43f1d59637e77af9bb
Deleted: sha256:98fc59c935e697d6375f05f4fa29d0e1ef7e8ece61aed109056926983ada0ef4
Deleted: sha256:c21ff68b02e7caf277f5d356e8b323a95e8d3969dd1ab0d9f60e7c8b4a01c874
Deleted: sha256:afa3e488a0ee76983343f8aa759e4b7b898db65b715eb90abc81c181388374e3
2022/01/27 15:53:56.249384 [INFO] restore docker image name with command: docker tag af10382f81a4 aws_beanstalk/current-app:latest
2022/01/27 15:53:56.249393 [INFO] Running command /bin/sh -c docker tag af10382f81a4 aws_beanstalk/current-app:latest
2022/01/27 15:53:56.352957 [INFO] restore docker image name with command: docker tag adafe645300e :
2022/01/27 15:53:56.352988 [INFO] Running command /bin/sh -c docker tag adafe645300e :
2022/01/27 15:53:56.360403 [INFO] restore docker image name with command: docker tag 3bc5f7759e81 openjdk:8
2022/01/27 15:53:56.360437 [INFO] Running command /bin/sh -c docker tag 3bc5f7759e81 openjdk:8
2022/01/27 15:53:56.461652 [INFO] restore docker image name with command: docker tag 498ac51e5e6e maven:3.8.1-jdk-8
2022/01/27 15:53:56.461677 [INFO] Running command /bin/sh -c docker tag 498ac51e5e6e maven:3.8.1-jdk-8
2022/01/27 15:53:56.561836 [INFO] Executing instruction: start X-Ray
2022/01/27 15:53:56.561859 [INFO] X-Ray is not enabled.
2022/01/27 15:53:56.561863 [INFO] Executing instruction: configureSqsd
2022/01/27 15:53:56.561868 [INFO] This is a web server environment instance, skip configure sqsd daemon ...
2022/01/27 15:53:56.561871 [INFO] Executing instruction: startSqsd
2022/01/27 15:53:56.561874 [INFO] This is a web server environment instance, skip start sqsd daemon ...
2022/01/27 15:53:56.561877 [INFO] Executing instruction: Track pids in healthd
2022/01/27 15:53:56.561881 [INFO] This is an enhanced health env...
2022/01/27 15:53:56.561891 [INFO] Running command /bin/sh -c systemctl show -p ConsistsOf aws-eb.target | cut -d= -f2
2022/01/27 15:53:56.572170 [INFO] cfn-hup.service docker.service nginx.service healthd.service eb-docker-log.service eb-docker-events.service eb-docker.service
2022/01/27 15:53:56.572206 [INFO] Running command /bin/sh -c systemctl show -p ConsistsOf eb-app.target | cut -d= -f2
2022/01/27 15:53:56.583143 [INFO]
2022/01/27 15:53:56.583747 [INFO] Executing instruction: Configure Docker Container Logging
2022/01/27 15:53:56.587182 [INFO] Executing instruction: RunAppDeployPostDeployHooks
2022/01/27 15:53:56.587200 [INFO] The dir .platform/hooks/postdeploy/ does not exist in the application. Skipping this step...
2022/01/27 15:53:56.587204 [INFO] Executing cleanup logic
2022/01/27 15:53:56.587325 [INFO] CommandService Response: {"status":"SUCCESS","api_version":"1.0","results":[{"status":"SUCCESS","msg":"Engine execution has succeeded.","returncode":0,"events":[{"msg":"Instance deployment completed successfully.","timestamp":1643298836,"severity":"INFO"}]}]}
2022/01/27 15:53:56.587458 [INFO] Platform Engine finished execution on command: app-deploy
2022/01/27 15:56:08.141406 [INFO] Starting...
2022/01/27 15:56:08.141500 [INFO] Starting EBPlatform-PlatformEngine
2022/01/27 15:56:08.141523 [INFO] reading event message file
2022/01/27 15:56:08.141619 [INFO] no eb envtier info file found, skip loading env tier info.
2022/01/27 15:56:08.141697 [INFO] Engine received EB command cfn-hup-exec
2022/01/27 15:56:08.291283 [INFO] Running command /bin/sh -c /opt/aws/bin/cfn-get-metadata -s arn:aws:cloudformation:us-east-1:796071762232:stack/awseb-e-zzq77xp3px-stack/a072a330-7f88-11ec-8245-125e3f27604f -r AWSEBAutoScalingGroup --region us-east-1
2022/01/27 15:56:08.851246 [INFO] Running command /bin/sh -c /opt/aws/bin/cfn-get-metadata -s arn:aws:cloudformation:us-east-1:796071762232:stack/awseb-e-zzq77xp3px-stack/a072a330-7f88-11ec-8245-125e3f27604f -r AWSEBBeanstalkMetadata --region us-east-1
2022/01/27 15:56:09.238835 [INFO] checking whether command tail-log is applicable to this instance...
2022/01/27 15:56:09.238847 [INFO] this command is applicable to the instance, thus instance should execute command
2022/01/27 15:56:09.238849 [INFO] Engine command: (tail-log)
2022/01/27 15:56:09.238906 [INFO] Executing instruction: GetTailLogs
2022/01/27 15:56:09.238910 [INFO] Tail Logs...
2022/01/27 15:56:09.239208 [INFO] Running command /bin/sh -c tail -n 100 /var/log/eb-engine.log
----------------------------------------
/var/log/nginx/access.log
----------------------------------------
172.31.35.54 - - [27/Jan/2022:15:53:59 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\x82\x02\x92T\xC0\x06O\x7F\xAA\xB5=\xC8\x8Ca\x83v\xFF\xF7\x8E\xF2\xB9\xBDW\x1B\xB9\x9A\x91x\xB0\x81\xBF\xA6\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.35.54 - - [27/Jan/2022:15:54:14 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\xBAy5)=k\x1D\x19|\xF6\xBC\xB0B\x10\x0B$\xE8#\x06\x8B\xA1iY\xB4@@+-\x1F\xAC\x92&\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.35.54 - - [27/Jan/2022:15:54:29 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\x03\xBC\xF2\x93\x90uW\xC0\xA5f\xFFWz~K_\xF61\xAEsuY\xE2R\xE0\xBC&\xE7\xFB|\xDB\xC2\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.35.54 - - [27/Jan/2022:15:54:44 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\x84\xFD\xD5\xA5{\xF7\xDEr\x96\xEB" 400 157 "-" "-" "-"
172.31.35.54 - - [27/Jan/2022:15:54:59 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\xBCU\xC9\x92=\xCBT\xC2\xB8RL\xA3\xF7\xE6\xD4s\xB8!A\xF2\x14\xC3" 400 157 "-" "-" "-"
172.31.85.167 - - [27/Jan/2022:15:55:09 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03f\x1B\xB8\x17\x19k|H\x1DW\xEF&\x83\x03#\xE9GB\xE8f\xB4\xDAGJ]\x8E\x92\xD6\xC8L\xD3%\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.35.54 - - [27/Jan/2022:15:55:14 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\xCC\x9D\x1A5&\x99\xB76\x16\xC1\xE2\xB5\xC3:G]\x1A\xA5H\xEE\xF6s\xD0\xF9s\xA3\xBE\xD2\x9Aq\xF0\xC2\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.85.167 - - [27/Jan/2022:15:55:24 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03j4x\xF0\x86uwh\x1C\xEEg8\xA9\xA3\x1E(\x18C\x96\xFA\xE8\xA6\x87{\xC3N\xD4\x08\x10\xBA\xAC\x03\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.35.54 - - [27/Jan/2022:15:55:29 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\x5C\x8Btq\xBEG\xD2\xF8l\xC8\xBA\x94F\x14\x8F\x1C\xCC\xA1@JSw9\xE4\xCD\xA7\x05\x82\xE4][\xB8\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.85.167 - - [27/Jan/2022:15:55:39 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03{\x05\x86\x89\x09.:A\x0C\xCF\x14\xA4=\xDF\xFA\xC6\xD4\xF5+\x9D\xA4\xF8\x93\xE9k\xD5\xD3\xC5\xCA\x9C\xFB\x15\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.35.54 - - [27/Jan/2022:15:55:44 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\xBC\xF3\xE3\xDEy\xB3(\xF2\x18\xEB\xC5f\x1F\xA2\xF5\xE6\xF5\x8C\xF6lO\x98D\xFAT\xCB\xB3`\x9C\xC2\xCE.\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.85.167 - - [27/Jan/2022:15:55:54 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\x16P\x10\x07}\x90\xBD!\x9E\xA1\xAB\xD9\xDD\x1F\xAA\xBF\x85u\xCF\xE7\xAD\xA9\x93$q\xC4" 400 157 "-" "-" "-"
172.31.35.54 - - [27/Jan/2022:15:55:59 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03x\x94z\x84\x1Buz3\x9A\x8FbX\x07\x13\x00\x8DH\xDFf\x10\xC9\xE7\xDB\xF7\xE7\xBFr\xE8w>\xFC\x9E\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
172.31.85.167 - - [27/Jan/2022:15:56:09 +0000] "\x16\x03\x01\x00\xA3\x01\x00\x00\x9F\x03\x03\xEF\x1F'\x84@\xF4\xF4\xB6C\xEE\xE4}\xD6E\x94\x05\xA1\x1B*\x1EZ\x94N\xB9K\x96A>\x8A\x8Ep\xBF\x00\x00&\xC0+\xC0/\xC0#\xC0'\xC0\x09\xC0\x13\xC0,\xC00\xC0$\xC0(\xC0\x14\xC0" 400 157 "-" "-" "-"
----------------------------------------
/var/log/nginx/error.log
----------------------------------------
----------------------------------------
/var/log/docker-events.log
----------------------------------------
2022-01-27T15:52:46.764393026Z image pull maven:3.8.1-jdk-8 (name=maven)
2022-01-27T15:52:47.730944524Z container create b83331900dd580a01b9c5e2744412bd6f6e4465313177fb45a2f288d70765010 (image=sha256:16aedb83589da925c19d2f692234a2a36c017b35846c07fd8ad6817cceda6ae9, name=inspiring_tesla)
2022-01-27T15:52:47.731203832Z container attach b83331900dd580a01b9c5e2744412bd6f6e4465313177fb45a2f288d70765010 (image=sha256:16aedb83589da925c19d2f692234a2a36c017b35846c07fd8ad6817cceda6ae9, name=inspiring_tesla)
2022-01-27T15:52:47.784204703Z network connect 38cc920306e67474a0e4c1558a074911f27746d82bcaf75a013b36aa57d583d3 (container=b83331900dd580a01b9c5e2744412bd6f6e4465313177fb45a2f288d70765010, name=bridge, type=bridge)
2022-01-27T15:52:48.320837501Z container start b83331900dd580a01b9c5e2744412bd6f6e4465313177fb45a2f288d70765010 (image=sha256:16aedb83589da925c19d2f692234a2a36c017b35846c07fd8ad6817cceda6ae9, name=inspiring_tesla)
2022-01-27T15:53:28.504262431Z container die b83331900dd580a01b9c5e2744412bd6f6e4465313177fb45a2f288d70765010 (exitCode=0, image=sha256:16aedb83589da925c19d2f692234a2a36c017b35846c07fd8ad6817cceda6ae9, name=inspiring_tesla)
2022-01-27T15:53:28.615767036Z network disconnect 38cc920306e67474a0e4c1558a074911f27746d82bcaf75a013b36aa57d583d3 (container=b83331900dd580a01b9c5e2744412bd6f6e4465313177fb45a2f288d70765010, name=bridge, type=bridge)
2022-01-27T15:53:30.828196270Z container destroy b83331900dd580a01b9c5e2744412bd6f6e4465313177fb45a2f288d70765010 (image=sha256:16aedb83589da925c19d2f692234a2a36c017b35846c07fd8ad6817cceda6ae9, name=inspiring_tesla)
2022-01-27T15:53:40.412059108Z image pull openjdk:8 (name=openjdk)
2022-01-27T15:53:41.682562011Z container create ebb956fca825c2053c41bce28fb0a802ab2f3ef344bdeb14f821a7577c284138 (image=sha256:2ab20532670b7570e512ec955536dfa5e246c374bdca4f0494df107b88a51c75, name=stoic_fermi)
2022-01-27T15:53:41.807749332Z container destroy ebb956fca825c2053c41bce28fb0a802ab2f3ef344bdeb14f821a7577c284138 (image=sha256:2ab20532670b7570e512ec955536dfa5e246c374bdca4f0494df107b88a51c75, name=stoic_fermi)
2022-01-27T15:53:41.854905318Z container create 28814d73d5d71c7f3cd97d31e3745db7c8d74c7f41a1369d86a6ac94540ff54c (image=sha256:8020ea63973791b37416e569141e448a047578432cc73771afc09069d4a0f99c, name=awesome_ritchie)
2022-01-27T15:53:41.972362390Z container destroy 28814d73d5d71c7f3cd97d31e3745db7c8d74c7f41a1369d86a6ac94540ff54c (image=sha256:8020ea63973791b37416e569141e448a047578432cc73771afc09069d4a0f99c, name=awesome_ritchie)
2022-01-27T15:53:41.978868467Z image tag sha256:af10382f81a47247f3194b007fe0b95c08b2a68c7d9f8f4118741b00121ee217 (name=aws_beanstalk/staging-app:latest)
2022-01-27T15:53:46.962572822Z container create 3771e61e64aec3296f70d863c3deeae6e33d57184feecc1297665eee4630c399 (image=af10382f81a4, name=dreamy_napier)
2022-01-27T15:53:47.000564620Z network connect 38cc920306e67474a0e4c1558a074911f27746d82bcaf75a013b36aa57d583d3 (container=3771e61e64aec3296f70d863c3deeae6e33d57184feecc1297665eee4630c399, name=bridge, type=bridge)
2022-01-27T15:53:47.520980591Z container start 3771e61e64aec3296f70d863c3deeae6e33d57184feecc1297665eee4630c399 (image=af10382f81a4, name=dreamy_napier)
2022-01-27T15:53:53.482805850Z image tag sha256:af10382f81a47247f3194b007fe0b95c08b2a68c7d9f8f4118741b00121ee217 (name=aws_beanstalk/current-app:latest)
2022-01-27T15:53:53.562121224Z image untag sha256:af10382f81a47247f3194b007fe0b95c08b2a68c7d9f8f4118741b00121ee217 (name=sha256:af10382f81a47247f3194b007fe0b95c08b2a68c7d9f8f4118741b00121ee217)
2022-01-27T15:53:55.349273944Z image delete sha256:adafe645300e41dd29b04abccf86a562ad5e635bd6afff9343b6a45721fb3a45 (name=sha256:adafe645300e41dd29b04abccf86a562ad5e635bd6afff9343b6a45721fb3a45)
2022-01-27T15:53:55.351988220Z image delete sha256:16aedb83589da925c19d2f692234a2a36c017b35846c07fd8ad6817cceda6ae9 (name=sha256:16aedb83589da925c19d2f692234a2a36c017b35846c07fd8ad6817cceda6ae9)
2022-01-27T15:53:55.356884258Z image delete sha256:fe90a30920d18ecad75ec02e8c04894fbcaadc209529c3e5c14fdaa66d3a7bc9 (name=sha256:fe90a30920d18ecad75ec02e8c04894fbcaadc209529c3e5c14fdaa66d3a7bc9)
2022-01-27T15:53:55.374500965Z image untag sha256:498ac51e5e6e99ae8646d007ed554587a4ceeab78a664dc7eedde7137c658e9e (name=sha256:498ac51e5e6e99ae8646d007ed554587a4ceeab78a664dc7eedde7137c658e9e)
2022-01-27T15:53:55.376309688Z image untag sha256:498ac51e5e6e99ae8646d007ed554587a4ceeab78a664dc7eedde7137c658e9e (name=sha256:498ac51e5e6e99ae8646d007ed554587a4ceeab78a664dc7eedde7137c658e9e)
2022-01-27T15:53:56.244254893Z image delete sha256:498ac51e5e6e99ae8646d007ed554587a4ceeab78a664dc7eedde7137c658e9e (name=sha256:498ac51e5e6e99ae8646d007ed554587a4ceeab78a664dc7eedde7137c658e9e)
2022-01-27T15:53:56.345382037Z image tag sha256:af10382f81a47247f3194b007fe0b95c08b2a68c7d9f8f4118741b00121ee217 (name=aws_beanstalk/current-app:latest)
2022-01-27T15:53:56.458746013Z image tag sha256:3bc5f7759e81182b118ab4d74087103d3733483ea37080ed5b6581251d326713 (name=openjdk:8)
----------------------------------------
/var/log/eb-docker-process.log
----------------------------------------
2022/01/27 15:53:53.917760 [INFO] Loading Manifest...
2022/01/27 15:53:53.917884 [INFO] no eb envtier info file found, skip loading env tier info.
2022/01/27 15:53:53.943756 [INFO] Running command /bin/sh -c /opt/aws/bin/cfn-get-metadata -s arn:aws:cloudformation:us-east-1:796071762232:stack/awseb-e-zzq77xp3px-stack/a072a330-7f88-11ec-8245-125e3f27604f -r AWSEBAutoScalingGroup --region us-east-1
2022/01/27 15:53:57.965132 [INFO] Running command /bin/sh -c /opt/aws/bin/cfn-get-metadata -s arn:aws:cloudformation:us-east-1:796071762232:stack/awseb-e-zzq77xp3px-stack/a072a330-7f88-11ec-8245-125e3f27604f -r AWSEBBeanstalkMetadata --region us-east-1
2022/01/27 15:53:58.364393 [INFO] Checking if docker is running...
2022/01/27 15:53:58.364409 [INFO] Fetch current app container id...
2022/01/27 15:53:58.364434 [INFO] Running command /bin/sh -c docker ps | grep 3771e61e64ae
2022/01/27 15:53:58.402972 [INFO] 3771e61e64ae af10382f81a4 "java -jar /usr/loca…" 12 seconds ago Up 10 seconds 5000/tcp dreamy_napier
2022/01/27 15:53:58.402996 [INFO] Running command /bin/sh -c docker wait 3771e61e64ae
----------------------------------------
/var/log/docker
----------------------------------------
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.206815429Z" level=info msg="Starting up"
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.251734173Z" level=info msg="parsed scheme: \"unix\"" module=grpc
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.251769208Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.251794146Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.251813620Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.273290447Z" level=info msg="parsed scheme: \"unix\"" module=grpc
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.273327673Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.273364441Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.273386710Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.465282859Z" level=info msg="Loading containers: start."
Jan 27 15:50:41 ip-172-31-85-60 docker: time="2022-01-27T15:50:41.956009883Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
Jan 27 15:50:42 ip-172-31-85-60 docker: time="2022-01-27T15:50:42.186887273Z" level=info msg="Loading containers: done."
Jan 27 15:50:42 ip-172-31-85-60 docker: time="2022-01-27T15:50:42.641490298Z" level=info msg="Docker daemon" commit=b0f5bc3 graphdriver(s)=overlay2 version=20.10.7
Jan 27 15:50:42 ip-172-31-85-60 docker: time="2022-01-27T15:50:42.643174227Z" level=info msg="Daemon has completed initialization"
Jan 27 15:50:42 ip-172-31-85-60 docker: time="2022-01-27T15:50:42.702629222Z" level=info msg="API listen on /run/docker.sock"
Jan 27 15:53:28 ip-172-31-85-60 docker: time="2022-01-27T15:53:28.503145956Z" level=info msg="ignoring event" container=b83331900dd580a01b9c5e2744412bd6f6e4465313177fb45a2f288d70765010 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Jan 27 15:53:41 ip-172-31-85-60 docker: time="2022-01-27T15:53:41.783532791Z" level=info msg="Layer sha256:e963a094d3f25a21ce0bfcae0216d04385c4c06ad580c73675a7992627c28416 cleaned up"
Jan 27 15:53:41 ip-172-31-85-60 docker: time="2022-01-27T15:53:41.948756315Z" level=info msg="Layer sha256:e963a094d3f25a21ce0bfcae0216d04385c4c06ad580c73675a7992627c28416 cleaned up"
----------------------------------------
/var/log/eb-docker/containers/eb-current-app/eb-3771e61e64ae-stdouterr.log
----------------------------------------
. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v2.5.6)
2022-01-27 15:53:57.807 INFO 3771e61e64ae --- [ main] o.s.b.a.e.w.EndpointLinksResolver : Exposing 1 endpoint(s) beneath base path '/actuator'
2022-01-27 15:53:57.853 INFO 3771e61e64ae --- [ main] o.a.c.h.Http11NioProtocol : Starting ProtocolHandler ["http-nio-5000"]
2022-01-27 15:53:57.875 INFO 3771e61e64ae --- [ main] o.s.b.w.e.t.TomcatWebServer : Tomcat started on port(s): 5000 (http) with context path ''
2022-01-27 15:53:57.903 INFO 3771e61e64ae --- [ main] c.n.p.ParalleniumHostApplication : Started ParalleniumHostApplication in 8.805 seconds (JVM running for 10.386)
2022-01-27 15:53:57.939 INFO 3771e61e64ae --- [ main] c.n.p.ParalleniumHostApplication : **The server is hosted at: 127.0.0.1:5000 with a PUBLIC ip of 34.226.166.24
2022-01-27 15:53:57.941 INFO 3771e61e64ae --- [ main] c.n.p.ParalleniumHostApplication : Spring version is 5.3.12
2022-01-27 15:53:57.946 INFO 3771e61e64ae --- [ main] c.n.p.ParalleniumHostApplication : Socket Server is listening on port 6868...
ANSWER
Answered 2022-Jan-27 at 17:18Okay, so I decided to just launch a new environment using the same exact configuration and code and it worked. Looks like Elastic Beanstalk environments can break and once that happens, there is no fixing it apparently.
QUESTION
I am trying to solve a BVP problem (Cosserat rod ODE) with gekko. The goal is to find the initial conditions nsol and msol (which correspond to the internal forces and moments of the rod) that minimize the cost function (the position of the final point of the rod), when integrating, the cosserat equations gives us P, R, nsol, msol, which correspond to the position, orientation, internal forces and moment in a section of the rod.
but I keep getting this error:
Exception: @error: Equation Definition Equation without an equality (=) or inequality (>,<) false STOPPING...
I am a beginner with gekko and although I have seen multiple threads with the same error, the source of the error seems to be different everytime. Could anyone please point me in the right direction ? Thank you very much
import numpy as np
import math
from scipy import integrate
import matplotlib.pyplot as plt
from gekko import GEKKO
E = 200e7
nu = 0.3
G = E/(2*(1+nu))
r = 0.01
rho = 8000
g = np.array([0, 0, 0])
ray = 1
A = np.pi*r**2
I = (np.pi*r**4)/4
J = 2*I
L = 1
Lfin = 1.5
Kse = np.diag([G*A, G*A, E*A])
Kbt = np.diag([E*I, E*I, G*J])
def antisym(y):
AS = np.array([[0, -y[2], y[1]], [y[2], 0, -y[0]], [-y[1], y[0], 0]])
return AS
m = GEKKO()
dl = 81
m.time = np.linspace(0, L, dl)
# Parameters
R = m.Array(m.Var, (3,3))
P = m.Array(m.Var, (3))
R[0,0].value = 1
R[1,1].value = 1
R[2,2].value = 1
R[0,1].value = 0
R[0,2].value = 0
R[1,0].value = 0
R[1,2].value = 0
R[2,0].value = 0
R[2,1].value = 0
P[0].value = 0
P[1].value = 0
P[2].value = 0
#R = m.Array(m.Var, (3,3),lb=0,ub=1, value = np.eye(3))
#P = m.Array(m.Var, (3), value = np.zeros(3))
v = m.Array(m.Var, (3))
u = m.Array(m.Var, (3))
# Variables
nsol = m.Array(m.Var, (3), value = 0)
msol = m.Array(m.Var, (3), value = 0)
test = np.zeros(dl)
test[-1] = 1.0
final = m.Param(value = test)
# Equations
m.Equation(v == np.dot(np.dot(np.diag((1/(G*A), 1/(G*A), 1/(E*A))), np.transpose(R)), nsol) + np.array([0,0,1]))
m.Equation(u == np.dot(np.dot(np.diag((1/(E*I), 1/(E*I), 1/(G*J))), np.transpose(R)), msol) + np.array([0,0,0]))
for i in range(2):
m.Equation(P[i].dt() == np.dot(R[i, :],v))
for i in range(2):
for j in range(2):
m.Equation(R[i, j].dt() == np.dot(R[i, :], antisym(u)[:, j]))
for i in range(2):
m.Equation(nsol[i].dt() == 0)
m.Equation(msol[0].dt() == -(P[1].dt()*nsol[2]-P[2].dt()*nsol[1]))
m.Equation(msol[1].dt() == -(P[2].dt()*nsol[0]-P[0].dt()*nsol[2]))
m.Equation(msol[2].dt() == -(P[0].dt()*nsol[1]-P[1].dt()*nsol[0]))
# Objective
m.Minimize(P[2]*final - Lfin)
m.options.IMODE = 6
m.solve()
ANSWER
Answered 2021-Dec-22 at 13:36One way to troubleshoot these types of errors is to inspect the gk0_model.apm
model file in the run directory m.path
. I modified the code to open the folder with m.open_folder()
and the apm
file:
Model
Parameters
p1
End Parameters
Variables
v1 = 1
v2 = 0
v3 = 0
v4 = 0
v5 = 1
v6 = 0
v7 = 0
v8 = 0
v9 = 1
v10 = 0
v11 = 0
v12 = 0
v13 = 0
v14 = 0
v15 = 0
v16 = 0
v17 = 0
v18 = 0
v19 = 0
v20 = 0
v21 = 0
v22 = 0
v23 = 0
v24 = 0
End Variables
Equations
False
False
$v10=((((v1)*(v13))+((v2)*(v14)))+((v3)*(v15)))
$v11=((((v4)*(v13))+((v5)*(v14)))+((v6)*(v15)))
$v1=((((v1)*(0))+((v2)*(v18)))+((v3)*((-v17))))
$v2=((((v1)*((-v18)))+((v2)*(0)))+((v3)*(v16)))
$v4=((((v4)*(0))+((v5)*(v18)))+((v6)*((-v17))))
$v5=((((v4)*((-v18)))+((v5)*(0)))+((v6)*(v16)))
$v19=0
$v20=0
$v22=(-((($v11)*(v21))-(($v12)*(v20))))
$v23=(-((($v12)*(v19))-(($v10)*(v21))))
$v24=(-((($v10)*(v20))-(($v11)*(v19))))
minimize (((v12)*(p1))-1.5)
End Equations
End Model
The first two equations are listed as False
. This means that python evaluated the ==
is a comparative statement versus a symbolic expression. Gekko symbolic expressions are needed to compile the model into byte-code for automatic differentiation. In this case, the equations:
m.Equation(v == np.dot(np.dot(np.diag((1/(G*A), 1/(G*A), 1/(E*A))),\
np.transpose(R)), nsol) + np.array([0,0,1]))
m.Equation(u == np.dot(np.dot(np.diag((1/(E*I), 1/(E*I), 1/(G*J))),\
np.transpose(R)), msol) + np.array([0,0,0]))
are vectors and should be scalars.
# Equations
r1 = np.dot(np.dot(np.diag((1/(G*A), 1/(G*A), 1/(E*A))), \
np.transpose(R)), nsol) + np.array([0,0,1])
r2 = np.dot(np.dot(np.diag((1/(E*I), 1/(E*I), 1/(G*J))), \
np.transpose(R)), msol) + np.array([0,0,0])
for i in range(3):
m.Equation(v[i]==r1[i])
m.Equation(u[i]==r2[i])
This gives an unbounded solution error when attempting to solve. Adding a lower bound of -1000
and upper bound of 1000
for all variables gives a successful solution. If variables at at the bound, it may indicate that the problem is over-specified or unbounded without the artificial bounds.
import numpy as np
import math
from scipy import integrate
import matplotlib.pyplot as plt
from gekko import GEKKO
E = 200e7
nu = 0.3
G = E/(2*(1+nu))
r = 0.01
rho = 8000
g = np.array([0, 0, 0])
ray = 1
A = np.pi*r**2
I = (np.pi*r**4)/4
J = 2*I
L = 1
Lfin = 1.5
Kse = np.diag([G*A, G*A, E*A])
Kbt = np.diag([E*I, E*I, G*J])
def antisym(y):
AS = np.array([[0, -y[2], y[1]], [y[2], 0, -y[0]], [-y[1], y[0], 0]])
return AS
m = GEKKO()
dl = 81
m.time = np.linspace(0, L, dl)
# Parameters
R = m.Array(m.Var, (3,3), lb=-1000, ub=1000)
P = m.Array(m.Var, (3), lb=-1000, ub=1000)
R[0,0].value = 1
R[1,1].value = 1
R[2,2].value = 1
R[0,1].value = 0
R[0,2].value = 0
R[1,0].value = 0
R[1,2].value = 0
R[2,0].value = 0
R[2,1].value = 0
P[0].value = 0
P[1].value = 0
P[2].value = 0
#R = m.Array(m.Var, (3,3),lb=0,ub=1, value = np.eye(3))
#P = m.Array(m.Var, (3), value = np.zeros(3))
v = m.Array(m.Var, (3), lb=-1000, ub=1000)
u = m.Array(m.Var, (3), lb=-1000, ub=1000)
# Variables
nsol = m.Array(m.Var, (3), value = 0, lb=-1000, ub=1000)
msol = m.Array(m.Var, (3), value = 0, lb=-1000, ub=1000)
test = np.zeros(dl)
test[-1] = 1.0
final = m.Param(value = test)
# Equations
r1 = np.dot(np.dot(np.diag((1/(G*A), 1/(G*A), 1/(E*A))), \
np.transpose(R)), nsol) + np.array([0,0,1])
r2 = np.dot(np.dot(np.diag((1/(E*I), 1/(E*I), 1/(G*J))), \
np.transpose(R)), msol) + np.array([0,0,0])
for i in range(3):
m.Equation(v[i]==r1[i])
m.Equation(u[i]==r2[i])
for i in range(2):
m.Equation(P[i].dt() == np.dot(R[i, :],v))
for i in range(2):
for j in range(2):
m.Equation(R[i, j].dt() == np.dot(R[i, :], antisym(u)[:, j]))
for i in range(2):
m.Equation(nsol[i].dt() == 0)
m.Equation(msol[0].dt() == -(P[1].dt()*nsol[2]-P[2].dt()*nsol[1]))
m.Equation(msol[1].dt() == -(P[2].dt()*nsol[0]-P[0].dt()*nsol[2]))
m.Equation(msol[2].dt() == -(P[0].dt()*nsol[1]-P[1].dt()*nsol[0]))
# Objective
m.Minimize(P[2]*final - Lfin)
m.options.IMODE = 6
#m.open_folder()
m.solve()
Successful Solution Summary:
iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls
0 -1.2000000e+02 1.00e+00 1.24e-02 0.0 0.00e+00 - 0.00e+00 0.00e+00 0
1 -6.2000001e+02 4.70e-14 3.40e-01 -3.0 4.00e+04 - 6.60e-01 1.00e+00f 1
2 -1.1150000e+03 8.00e-14 6.43e-04 1.0 5.86e+04 - 1.00e+00 6.76e-01f 1
3 -1.1199121e+03 9.48e-14 3.86e-08 -1.1 3.93e+02 - 9.98e-01 1.00e+00f 1
4 -1.1199991e+03 7.96e-14 2.43e-10 -3.1 6.97e+00 - 9.98e-01 9.99e-01f 1
Reallocating memory for MA57: lfact (156431)
5 -1.1200000e+03 6.50e-14 2.43e-13 -9.0 7.03e-02 - 9.99e-01 9.99e-01f 1
Number of Iterations....: 5
(scaled) (unscaled)
Objective...............: -1.1200000091288521e+03 -1.1200000091288521e+03
Dual infeasibility......: 2.4264487412842937e-13 2.4264487412842937e-13
Constraint violation....: 6.4955110402786716e-14 6.4955110402786716e-14
Complementarity.........: 9.8229036600334927e-07 9.8229036600334927e-07
Overall NLP error.......: 9.8229036600334927e-07 9.8229036600334927e-07
Number of objective function evaluations = 6
Number of objective gradient evaluations = 6
Number of equality constraint evaluations = 6
Number of inequality constraint evaluations = 0
Number of equality constraint Jacobian evaluations = 6
Number of inequality constraint Jacobian evaluations = 0
Number of Lagrangian Hessian evaluations = 5
Total CPU secs in IPOPT (w/o function evaluations) = 0.117
Total CPU secs in NLP function evaluations = 0.181
EXIT: Optimal Solution Found.
The solution was found.
The final value of the objective function is -1120.00000912885
---------------------------------------------------
Solver : IPOPT (v3.12)
Solution time : 0.334799999982351 sec
Objective : -1120.00000000000
Successful solution
---------------------------------------------------
QUESTION
I'm receiving the following error when invoking an AWS SAM Lambda function locally:
Missing AWS Lambda trace data for X-Ray. Ensure Active Tracing is enabled and no subsegments are created outside the function handler.
Below you can see my function:
/** Bootstrap */
require('dotenv').config()
const AWSXRay = require('aws-xray-sdk')
/** Libraries*/
const se = require('serialize-error')
/** Internal */
const logger = require('./src/utils/logger')
const ExecuteService = require('./src/service')
/**
*
*/
exports.handler = async (event) => {
const xraySegement = AWSXRay.getSegment()
const message = process.env.NODE_ENV == 'production' ? JSON.parse(event.Records[0].body) : event
try {
await ExecuteService(message)
} catch (err) {
logger.error({
error: se(err)
})
return err
}
}
In addition, I have Tracing set to Active in my template.yml.
What part of the documentation am I clearly misreading, missing, or reading over?
ANSWER
Answered 2021-Aug-14 at 21:10For now you can't invoke a SAM lambda locally with X-ray because it is not supported yet.
See
- [Feature Request] Add support for X-Ray on SAM Local #217
- aws-lambda-runtime-interface-emulator#level-of-support
The component does not support X-ray and other Lambda integrations locally.
If you don't care about X-ray and just want your code to work you can check the env variable AWS_SAM_LOCAL
to prevent X-ray usage:
let AWSXRay
if (!process.env.AWS_SAM_LOCAL) {
AWSXRay = require('aws-xray-sdk')
}
// ...
if (!process.env.AWS_SAM_LOCAL) {
const xraySegement = AWSXRay.getSegment()
}
QUESTION
This question is based on this video on YouTube made with the purpose of reviewing this project.
In the video, the host is analyzing the project and found out that the following block of code is a cause of performance issues:
std::optional HittableObjectList::Hit(const Ray &r, float t_min, float t_max) const {
float closest_so_far = t_max;
return std::accumulate(begin(objects), end(objects), std::optional{},
[&](const auto &temp_value, const auto &object) {
if(auto temp_hit = object -> Hit(r, t_min, closest_so_far); temp_hit) {
closest_so_far = temp_hit.value().t;
return temp_hit;
}
return temp_value;
});
}
I would assume that the std::accumulate
function would function similarly to a for
loop. Unhappy with the performance hit there (and because, for some reason, the profiler wouldn't profile the lambda code[a limitation, perhaps?]), the reviewer changed the code to this:
std::optional HittableObjectList::Hit(const Ray &r, float t_min, float t_max) const {
float closest_so_far = t_max;
std::optional record{};
for(size_t i = 0; i < objects.size(); i++) {
const std::shared_ptr &object = objects[i];
if(auto temp_hit = object -> Hit(r, t_min, closest_so_far); temp_hit) {
closest_so_far = temp_hit.value().t;
record = temp_hit;
}
}
return record;
}
With this change the time to completion went from 7 minutes and 30 seconds to 22 seconds.
My questions are:
- Aren't both blocks of code identical? Why does
std::accumulate
give such enormous penalty here? - Would the performance be better if instead of using
auto
s, using the explicit type?
The reviewer did mention suggestions such as avoiding the use of std::optional
s and std:shared_ptr
s here due to the amount of calls made and to execute this code on the GPU instead, but for now I'm only interested in those points mentioned earlier.
ANSWER
Answered 2021-Dec-07 at 08:52Desclaimer: I did not run advanced tests, this is just my analysis based on the video and the code.
From what I see in the profiling in the video, the hotspot in accumulate is here:
_Val = _Reduce_op(_Val, *_UFirst);
Since _Reduce_op
is just our lambda, and the profiling shows this lambda is not the bottleneck, then it means the only expensive operation here is the copy assignment operator =
.
Looking at HitRecord
:
struct HitRecord {
point3 p;
vec3 normal;
std::shared_ptr mat_ptr;
float t;
bool front_face;
...
We see there is a bunch of stuff including a shared_ptr
. Chances are the optimizer would remove the copy when it is not needed if the shared_ptr
was not here. Copying a shared_ptr
is relatevely expensive in a hot loop because it involves atomic operations.
Note that in the profiled accumulate
code, we see that they tried to fix this in c++20 by introducing a move.
#if _HAS_CXX20
_Val = _Reduce_op(_STD move(_Val), *_UFirst);
#else // ^^^ _HAS_CXX20 ^^^ // vvv !_HAS_CXX20 vvv
_Val = _Reduce_op(_Val, *_UFirst);
#endif // _HAS_CXX20
Though for this move to work, the compiler would have to properly use the named return value optimization which it does not always do when there are multiple return in a function. You would also have to change the signature of the lambda so that it takes a value or an r-value instead of a reference. Changing from auto to a named type would not fix the issue.
QUESTION
import React, { useState } from "react";
const App = () => {
const anecdotes = [
"If it hurts, do it more often",
"Adding manpower to a late software project makes it later!",
"The first 90 percent of the code accounts for the first 10 percent of the development time...The remaining 10 percent of the code accounts for the other 90 percent of the development time.",
"Any fool can write code that a computer can understand. Good programmers write code that humans can understand.",
"Premature optimization is the root of all evil.",
"Debugging is twice as hard as writing the code in the first place. Therefore, if you write the code as cleverly as possible, you are, by definition, not smart enough to debug it.",
"Programming without an extremely heavy use of console.log is same as if a doctor would refuse to use x-rays or blood tests when diagnosing patients",
];
const [selected, setSelected] = useState(0);
const [votes, setVotes] = useState([0, 0, 0, 0, 0, 0, 0]);
function getRandomInt(max) {
return Math.floor(Math.random() * max);
}
function pickRandomNumber() {
setSelected(getRandomInt(anecdotes.length));
}
function addVote() {
const newVotes = votes;
newVotes[selected] += 1;
setSelected(selected);
setVotes(newVotes);
}
return (
{anecdotes[selected]}
Has {votes[selected]} votes
vote
next anecdote
);
};
export default App;
So I basically have 7 anecdotes and i'm trying when i press on the button vote , it should add a vote , i'm counting that by having the array votes and adding one to the index in the votes array , the number gets added by the addVote function but it doesn't update on the screen , if i were to skip through to the same anecdote again , it shows just fine , any idea ?
this is the related div that doesn't update
Has {votes[selected]} votes
ANSWER
Answered 2021-Nov-25 at 00:40This should fix it:
function addVote() {
const newVotes = [...votes];
newVotes[selected] += 1;
setSelected(selected);
setVotes(newVotes);
}
You can read this answer for further explanation!
QUESTION
I want to draw a so called 'horizontal ray' or 'horizontal line' for some y points. I cannot find any option to do so for my charts. I tried using markers or even data labels but they are irrelevant. I'm currently using syncfusion library for drawing charts. Here is the image:
How can I achieve this?
this is the code I'm currently using, cartesian charts:
import 'dart:math';
import 'package:flutter/material.dart';
import 'package:syncfusion_flutter_charts/charts.dart';
class ChartAl extends StatefulWidget {
@override
_ChartPageState createState() => _ChartPageState();
}
late List chartData;
class _ChartPageState extends State {
late SelectionBehavior _selectionBehavior;
@override
void initState() {
_selectionBehavior = SelectionBehavior(
// Enables the selection
enable: true);
super.initState();
}
@override
Widget build(BuildContext context) {
chartData = getData();
return Container(
child: SfCartesianChart(
primaryYAxis: NumericAxis(),
primaryXAxis: DateTimeAxis(
intervalType: DateTimeIntervalType.days,
visibleMinimum: chartData[chartData.length - 29].x,
visibleMaximum: chartData[chartData.length - 1].x),
zoomPanBehavior: ZoomPanBehavior(
enablePanning: true,
),
series: >[
LineSeries(
initialSelectedDataIndexes: [2],
selectionBehavior: SelectionBehavior(
enable: true,
),
// markerSettings: MarkerSettings(
// isVisible: true,
// shape: DataMarkerType.horizontalLine
// ),
// dataLabelSettings: DataLabelSettings(
// // Renders the data label
// isVisible: true),
dataSource: chartData,
xValueMapper: (ChartData tendencias, _) => tendencias.x,
yValueMapper: (ChartData tendencias, _) => tendencias.y,
)
],
annotations: [
CartesianChartAnnotation(
widget: Container(
height: 1.0,
width: 200,
color: Colors.black,
),
coordinateUnit: CoordinateUnit.point,
x: DateTime(2018, 1, 10),
y: 20,
horizontalAlignment: ChartAlignment.near,
)
],
trackballBehavior: TrackballBehavior(
enable: true,
lineType: TrackballLineType.horizontal,
tooltipSettings:
InteractiveTooltip(enable: true, color: Colors.red)),
),
);
}
}
dynamic getData() {
List data = [];
for (int i = 1; i < 35; i++) {
data.add(ChartData(DateTime(2018, 1, i), getRandomInt(10, 100).toInt()));
}
return data;
}
num getRandomInt(num min, num max) {
final Random random = Random();
return min + random.nextInt((max - min).toInt());
}
class ChartData {
ChartData(this.x, this.y);
final DateTime x;
final int y;
}
I tried to draw it with a container with some width but as you see it does not work.
ANSWER
Answered 2021-Nov-08 at 14:41We recommend that you can use the PlotBand feature available in our chart widget to render horizontal line for y-points in the chart. We have also attached a simple chart sample below in which have rendered a horizontal line in the chart using the PlotBand feature.
SfCartesianChart(
primaryXAxis: DateTimeAxis(),
primaryYAxis: NumericAxis(
plotBands: [
PlotBand(
start: 32, // y-point for with the horizontal line needs to be drawn.
end: 32,
borderColor: Colors.red,
borderWidth: 2, // set the border width for the horizontal line.
associatedAxisStart: DateTime(2002) // set the required value for assicoated x-axis start property
)
]
),
)
Sample: https://www.syncfusion.com/downloads/support/directtrac/347691/ze/f170191_plotband-902619107
For further reference on the Plotband feature, please check the user guide below. https://help.syncfusion.com/flutter/cartesian-charts/axis-customization#plot-line
Also, to mention that, if you require to render the line on tapping on the chart, you can use the on PixelToPoint method to achieve it. Please find the sample link below for further reference. https://flutter.syncfusion.com/#/cartesian-charts/user-interactions/add-a-point-on-click
QUESTION
I am trying to plot a graph and have the color of the vertices be based on either the vertex ID or the name. I use a dataset with an adjacency matrix with column names. However, even with hours of searching I am clueless how I should color these nodes. the names of the columns are:
"Mohamed.Atta", "Marwan.Al.Shehhi", "Ziad.Jarrah", "Said.Bahaji", "Ramzi.Bin.al.Shibh" and the index : 15, 13, 25, 28, 14
my code:
library(statnet)
library(intergraph)
###Load dataset
Initial.matrix <- read.csv("C:/Users/marie/Downloads/9_11_HIJACKERS_ASSOCIATES1.csv",
header=TRUE)
matrix <- as.matrix(Initial.matrix)
associates <- graph.adjacency(matrix, mode="undirected", weighted=NULL)
##plot matrix
plot(associates, frame=T, main="9/11 hijacker associates",
vertex.label = NA, vertex.size = 10, edge.width = 1.5)```
##attempt 1: making a new column, which did not even work.
V(associates)$primesuspect <- c(0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0
,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
,0,0)
##attempt 2: try to access the names directly, did not work.
colors <- c("cyan", "tomato")
V(associates)
V(associates)$color <- V(associates)
V(associates)$color <- gsub("Mohamed.Atta", colors[0], V(associates)$color)
this is the matrix:
> dput(Initial.matrix)
structure(list(ï..Majed.Moqed = c(0L, 0L, 1L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L), Khalid.Al.Mihdhar = c(0L, 0L, 1L, 1L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Hani.Hanjour = c(1L, 1L,
0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Nawaf.Alhazmi = c(0L,
1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Salem.Alhazmi. = c(0L,
0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Ahmed.Alnami = c(0L,
0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Ahmed.Alghamdi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Saeed.Alghamdi. = c(0L,
0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Hamza.Alghamdi = c(0L,
0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Ahmed.Al.Haznawi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mohand.Alshehri. = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Fayez.Ahmed = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Ziad.Jarrah = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Marwan.Al.Shehhi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L,
0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mohamed.Atta = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L,
0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Abdul.Aziz.Al.Omari. = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Waleed.Alshehri = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Wail.Alshehri = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Satam.Suqami = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L,
1L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Raed.Hijazi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Nabil.al.Marabh = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mustafa.Ahamend.al.Hisawi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mamoun.Darkazanli = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Zakariya.Essabar = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Said.Bahaji = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mounir.El.Motassadeq = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Zacarias.Moussaoui = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L), Ramzi.Bin.al.Shibh = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Agus.Budiman = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Ahed.Khalil.Ibrahim.Samir.Al.Ani = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Lofti.Raissi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Rayed.Mohammed.Abdullah = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L,
1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Bandar.Alhazmi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Faisal.Al.Salmi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Osama.Awadallah = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Abdussattar.Shaikh = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mohamed.Abdi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mohamed.Belfas = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Imad.Eddin.Baraat.Yarkas = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L), Tarek.Maaroufi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L), Abu.Qatada = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L), Djamal.Benghal = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 1L, 1L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L), Jerome.Courtaillier = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L), David.Courtaillier = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L), Ahmen.Ressam = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, NA, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Abu.Walid = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L), Jean.Marc.Grandvisir = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Abu.Zubeida = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Nizar.Trabelsi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Haydar.Abu.Doha = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L,
0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mehdi.Khammoun = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Mohammed.Bensakhria = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L), Lased.Ben.Heni = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Essid.Sami.Ben.Khemail = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L), Seifallah.ben.Hassine = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L), Essoussi.Laaroussi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L), Tarek.Maaroufi.1 = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Fahid.al.Shakri = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Madjid.Sahoune = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Samir.Kishk = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), Kamel.Daoudi = c(0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)), class = "data.frame", row.names = c(NA,
-61L))
ANSWER
Answered 2021-Oct-13 at 08:05You can try the code below
plot(
associates,
frame = TRUE,
main = "9/11 hijacker associates",
vertex.color = c("cyan", "tomato")[1 + names(V(associates)) %in% c("Mohamed.Atta", "Marwan.Al.Shehhi", "Ziad.Jarrah", "Said.Bahaji", "Ramzi.Bin.al.Shibh")],
vertex.label = NA,
vertex.size = 10,
edge.width = 1.5
)
QUESTION
I'm currently having an Introduction to React
course on FullStackOpen and I'm stuck in the 1.14 anecdotes exercise
, which require the application to display the anecdote with the largest number of votes. The browser is able to render the Mostvote Array
, which render the votes of the most voted anecdote, but the most voted (bestAnecdote
) anecdote can't be display no matter what I do. Does anyone know where I did wrong? Thank you in advance :) Here below I have attached my React
code:
import React, { useState } from 'react'
const Header = (props) => {
return {props.contents}
}
const Button = (props) => (
{props.text}
)
const Vote = ({vote}) => (
has {vote} votes
)
const App = () => {
const contents = {
text1: "Anecdote of the day",
text2: "Anecdote with most votes"
}
const anecdotes = [
'If it hurts, do it more often',
'Adding manpower to a late software project makes it later!',
'The first 90 percent of the code accounts for the first 10 percent of the development time...The remaining 10 percent of the code accounts for the other 90 percent of the development time.',
'Any fool can write code that a computer can understand. Good programmers write code that humans can understand.',
'Premature optimization is the root of all evil.',
'Debugging is twice as hard as writing the code in the first place. Therefore, if you write the code as cleverly as possible, you are, by definition, not smart enough to debug it.',
'Programming without an extremely heavy use of console.log is same as if a doctor would refuse to use x-rays or blood tests when diagnosing patients'
]
const random = () => (
Math.floor(Math.random() * 7)
)
const [selected, setSelected] = useState(0)
const [vote, setVote] = useState(new Array(anecdotes.length).fill(0))
const Anecdotevoting = () => {
const copy = [...vote];
copy[selected] += 1;
setVote(copy);
}
const Mostvote = () => (
has {Math.max(...vote)} votes
)
const bestAnecdote = anecdotes[vote.indexOf(Mostvote)];
console.log(bestAnecdote)
return (
{anecdotes[selected]}
setSelected(random)} text = 'next anecdote'/>
)
}
export default App
ANSWER
Answered 2021-Oct-23 at 04:23Nick hit most of what the issues are. Here is a working version you can use to see where the issues are:
import React, { useState } from 'react'
const Header = (props) => {
return {props.contents}
}
const Button = (props) => (
{props.text}
)
const Vote = ({vote}) => (
has {vote} votes
)
const BestAnecdote = (props) => {
return {props.anecdotes}
}
const App = () => {
const contents = {
text1: "Anecdote of the day",
text2: "Anecdote with most votes"
}
const anecdotes = [
'If it hurts, do it more often',
'Adding manpower to a late software project makes it later!',
'The first 90 percent of the code accounts for the first 10 percent of the development time...The remaining 10 percent of the code accounts for the other 90 percent of the development time.',
'Any fool can write code that a computer can understand. Good programmers write code that humans can understand.',
'Premature optimization is the root of all evil.',
'Debugging is twice as hard as writing the code in the first place. Therefore, if you write the code as cleverly as possible, you are, by definition, not smart enough to debug it.',
'Programming without an extremely heavy use of console.log is same as if a doctor would refuse to use x-rays or blood tests when diagnosing patients'
]
const random = () => (
Math.floor(Math.random() * 7)
)
const [selected, setSelected] = useState(0)
const [vote, setVote] = useState(new Array(anecdotes.length).fill(0))
const Anecdotevoting = () => {
const copy = [...vote];
copy[selected] += 1;
setVote(copy);
}
const Mostvote = () => (
has {Math.max(...vote)} votes
)
return (
{anecdotes[selected]}
setSelected(random)} text = 'next anecdote'/>
)
}
export default App
Community Discussions, Code Snippets contain sources that include Stack Exchange Network
Vulnerabilities
No vulnerabilities reported
Install ray
You can use ray like any standard Python library. You will need to make sure that you have a development environment consisting of a Python distribution including header files, a compiler, pip, and git installed. Make sure that your pip, setuptools, and wheel are up to date. When using pip it is generally recommended to install packages in a virtual environment to avoid changes to the system.
Support
Find, review, and download reusable Libraries, Code Snippets, Cloud APIs from over 650 million Knowledge Items
Find more librariesExplore Kits - Develop, implement, customize Projects, Custom Functions and Applications with kandi kits
Save this library and start creating your kit
Share this Page